ceilometer-6.1.5/0000775000567000056710000000000013072745164015025 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/0000775000567000056710000000000013072745164017155 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/neutron_client.py0000664000567000056710000003746013072744706022572 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutronclient.common import exceptions from neutronclient.v2_0 import client as clientv20 from oslo_config import cfg from oslo_log import log from ceilometer import keystone_client SERVICE_OPTS = [ cfg.StrOpt('neutron', default='network', help='Neutron service type.'), cfg.StrOpt('neutron_lbaas_version', default='v2', choices=('v1', 'v2'), help='Neutron load balancer version.') ] cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') LOG = log.getLogger(__name__) def logged(func): @functools.wraps(func) def with_logging(*args, **kwargs): try: return func(*args, **kwargs) except exceptions.NeutronClientException as e: if e.status_code == 404: LOG.warning("The resource could not be found.") else: LOG.warning(e) return [] except Exception as e: LOG.exception(e) raise return with_logging class Client(object): """A client which gets information via python-neutronclient.""" def __init__(self): conf = cfg.CONF.service_credentials params = { 'session': keystone_client.get_session(), 'endpoint_type': conf.interface, 'region_name': conf.region_name, 'service_type': cfg.CONF.service_types.neutron, } self.client = clientv20.Client(**params) self.lb_version = cfg.CONF.service_types.neutron_lbaas_version @logged def port_get_all(self): resp = self.client.list_ports() return resp.get('ports') @logged def vip_get_all(self): resp = self.client.list_vips() return resp.get('vips') @logged def pool_get_all(self): resources = [] if self.lb_version == 'v1': resp = self.client.list_pools() resources = resp.get('pools') elif self.lb_version == 'v2': resources = self.list_pools_v2() return resources @logged def member_get_all(self): resources = [] if self.lb_version == 'v1': resp = self.client.list_members() resources = resp.get('members') elif self.lb_version == 'v2': resources = self.list_members_v2() return resources @logged def health_monitor_get_all(self): resources = [] if self.lb_version == 'v1': resp = self.client.list_health_monitors() resources = resp.get('health_monitors') elif self.lb_version == 'v2': resources = self.list_health_monitors_v2() return resources @logged def pool_stats(self, pool): return self.client.retrieve_pool_stats(pool) @logged def vpn_get_all(self): resp = self.client.list_vpnservices() return resp.get('vpnservices') @logged def ipsec_site_connections_get_all(self): resp = self.client.list_ipsec_site_connections() return resp.get('ipsec_site_connections') @logged def firewall_get_all(self): resp = self.client.list_firewalls() return resp.get('firewalls') @logged def fw_policy_get_all(self): resp = self.client.list_firewall_policies() return resp.get('firewall_policies') @logged def fip_get_all(self): fips = self.client.list_floatingips()['floatingips'] return fips @logged def list_pools_v2(self): """This method is used to get the pools list. This method uses Load Balancer v2_0 API to achieve the detailed list of the pools. :returns: The list of the pool resources """ pool_status = dict() resp = self.client.list_lbaas_pools() temp_pools = resp.get('pools') resources = [] pool_listener_dict = self._get_pool_and_listener_ids(temp_pools) for k, v in pool_listener_dict.items(): loadbalancer_id = self._get_loadbalancer_id_with_listener_id(v) status = self._get_pool_status(loadbalancer_id, v) for k, v in status.items(): pool_status[k] = v for pool in temp_pools: pool_id = pool.get('id') pool['status'] = pool_status[pool_id] pool['lb_method'] = pool.get('lb_algorithm') pool['status_description'] = pool['status'] # Based on the LBaaSv2 design, the properties 'vip_id' # and 'subnet_id' should belong to the loadbalancer resource and # not to the pool resource. However, because we don't want to # change the metadata of the pool resource this release, # we set them to empty values manually. pool['provider'] = '' pool['vip_id'] = '' pool['subnet_id'] = '' resources.append(pool) return resources @logged def list_members_v2(self): """Method is used to list the members info. This method is used to get the detailed list of the members with Load Balancer v2_0 API :returns: The list of the member resources """ resources = [] pools = self.client.list_lbaas_pools().get('pools') for pool in pools: pool_id = pool.get('id') listener_id = pool.get('listeners')[0].get('id') lb_id = self._get_loadbalancer_id_with_listener_id(listener_id) status = self._get_member_status(lb_id, [listener_id, pool_id]) resp = self.client.list_lbaas_members(pool_id) temp_members = resp.get('members') for member in temp_members: member['status'] = status[member.get('id')] member['pool_id'] = pool_id member['status_description'] = member['status'] resources.append(member) return resources @logged def list_health_monitors_v2(self): """Method is used to list the health monitors This method is used to get the detailed list of the health monitors with Load Balancer v2_0 :returns: The list of the health monitor resources """ resp = self.client.list_lbaas_healthmonitors() resources = resp.get('healthmonitors') return resources def _get_pool_and_listener_ids(self, pools): """Method is used to get the mapping between pool and listener This method is used to get the pool ids and listener ids from the pool list. :param pools: The list of the polls :returns: The relationship between pool and listener. It's a dictionary type. The key of this dict is the id of pool and the value of it is the id of the first listener which the pool belongs to """ pool_listener_dict = dict() for pool in pools: key = pool.get("id") value = pool.get('listeners')[0].get('id') pool_listener_dict[key] = value return pool_listener_dict def _retrieve_loadbalancer_status_tree(self, loadbalancer_id): """Method is used to get the status of a LB. This method is used to get the status tree of a specific Load Balancer. :param loadbalancer_id: The ID of the specific Load Balancer. :returns: The status of the specific Load Balancer. It consists of the load balancer and all of its children's provisioning and operating statuses """ lb_status_tree = self.client.retrieve_loadbalancer_status( loadbalancer_id) return lb_status_tree def _get_loadbalancer_id_with_listener_id(self, listener_id): """This method is used to get the loadbalancer id. :param listener_id: The ID of the listener :returns: The ID of the Loadbalancer """ listener = self.client.show_listener(listener_id) listener_lbs = listener.get('listener').get('loadbalancers') loadbalancer_id = listener_lbs[0].get('id') return loadbalancer_id def _get_member_status(self, loadbalancer_id, parent_id): """Method used to get the status of member resource. This method is used to get the status of member resource belonged to the specific Load Balancer. :param loadbalancer_id: The ID of the Load Balancer. :param parent_id: The parent ID list of the member resource. For the member resource, the parent_id should be [listener_id, pool_id]. :returns: The status dictionary of the member resource. The key is the ID of the member. The value is the operating statuse of the member resource. """ # FIXME(liamji) the following meters are experimental and # may generate a large load against neutron api. The future # enhancements can be tracked against: # https://review.openstack.org/#/c/218560. # After it has been merged and the neutron client supports # with the corresponding apis, will change to use the new # method to get the status of the members. resp = self._retrieve_loadbalancer_status_tree(loadbalancer_id) status_tree = resp.get('statuses').get('loadbalancer') status_dict = dict() listeners_status = status_tree.get('listeners') for listener_status in listeners_status: listener_id = listener_status.get('id') if listener_id == parent_id[0]: pools_status = listener_status.get('pools') for pool_status in pools_status: if pool_status.get('id') == parent_id[1]: members_status = pool_status.get('members') for member_status in members_status: key = member_status.get('id') # If the item has no the property 'id', skip # it. if key is None: continue # The situation that the property # 'operating_status' is none is handled in # the method get_sample() in lbaas.py. value = member_status.get('operating_status') status_dict[key] = value break break return status_dict def _get_listener_status(self, loadbalancer_id): """Method used to get the status of the listener resource. This method is used to get the status of the listener resources belonged to the specific Load Balancer. :param loadbalancer_id: The ID of the Load Balancer. :returns: The status dictionary of the listener resource. The key is the ID of the listener resource. The value is the operating statuse of the listener resource. """ # FIXME(liamji) the following meters are experimental and # may generate a large load against neutron api. The future # enhancements can be tracked against: # https://review.openstack.org/#/c/218560. # After it has been merged and the neutron client supports # with the corresponding apis, will change to use the new # method to get the status of the listeners. resp = self._retrieve_loadbalancer_status_tree(loadbalancer_id) status_tree = resp.get('statuses').get('loadbalancer') status_dict = dict() listeners_status = status_tree.get('listeners') for listener_status in listeners_status: key = listener_status.get('id') # If the item has no the property 'id', skip # it. if key is None: continue # The situation that the property # 'operating_status' is none is handled in # the method get_sample() in lbaas.py. value = listener_status.get('operating_status') status_dict[key] = value return status_dict def _get_pool_status(self, loadbalancer_id, parent_id): """Method used to get the status of pool resource. This method is used to get the status of the pool resources belonged to the specific Load Balancer. :param loadbalancer_id: The ID of the Load Balancer. :param parent_id: The parent ID of the pool resource. :returns: The status dictionary of the pool resource. The key is the ID of the pool resource. The value is the operating statuse of the pool resource. """ # FIXME(liamji) the following meters are experimental and # may generate a large load against neutron api. The future # enhancements can be tracked against: # https://review.openstack.org/#/c/218560. # After it has been merged and the neutron client supports # with the corresponding apis, will change to use the new # method to get the status of the pools. resp = self._retrieve_loadbalancer_status_tree(loadbalancer_id) status_tree = resp.get('statuses').get('loadbalancer') status_dict = dict() listeners_status = status_tree.get('listeners') for listener_status in listeners_status: listener_id = listener_status.get('id') if listener_id == parent_id: pools_status = listener_status.get('pools') for pool_status in pools_status: key = pool_status.get('id') # If the item has no the property 'id', skip # it. if key is None: continue # The situation that the property # 'operating_status' is none is handled in # the method get_sample() in lbaas.py. value = pool_status.get('operating_status') status_dict[key] = value break return status_dict @logged def list_listener(self): """This method is used to get the list of the listeners.""" resources = [] if self.lb_version == 'v2': # list_listeners works only with lbaas v2 extension resp = self.client.list_listeners() resources = resp.get('listeners') for listener in resources: loadbalancer_id = listener.get('loadbalancers')[0].get('id') status = self._get_listener_status(loadbalancer_id) listener['operating_status'] = status[listener.get('id')] return resources @logged def list_loadbalancer(self): """This method is used to get the list of the loadbalancers.""" resources = [] if self.lb_version == 'v2': # list_loadbalancers works only with lbaas v2 extension resp = self.client.list_loadbalancers() resources = resp.get('loadbalancers') return resources @logged def get_loadbalancer_stats(self, loadbalancer_id): """This method is used to get the statistics of the loadbalancer. :param loadbalancer_id: the ID of the specified loadbalancer """ resp = self.client.retrieve_loadbalancer_stats(loadbalancer_id) resource = resp.get('stats') return resource ceilometer-6.1.5/ceilometer/image/0000775000567000056710000000000013072745164020237 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/image/glance.py0000664000567000056710000001101713072744706022043 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common code for working with images """ from __future__ import absolute_import import glanceclient from oslo_config import cfg from oslo_utils import timeutils from ceilometer.agent import plugin_base from ceilometer import keystone_client from ceilometer import sample OPTS = [ cfg.IntOpt('glance_page_size', default=0, help="Number of items to request in " "each paginated Glance API request " "(parameter used by glancecelient). " "If this is less than or equal to 0, " "page size is not specified " "(default value in glanceclient is used)."), ] SERVICE_OPTS = [ cfg.StrOpt('glance', default='image', help='Glance service type.'), ] cfg.CONF.register_opts(OPTS) cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') class _Base(plugin_base.PollsterBase): @property def default_discovery(self): return 'endpoint:%s' % cfg.CONF.service_types.glance @staticmethod def get_glance_client(ksclient, endpoint): # hard-code v1 glance API version selection while v2 API matures return glanceclient.Client('1', session=keystone_client.get_session(), endpoint=endpoint, auth=ksclient.session.auth) def _get_images(self, ksclient, endpoint): client = self.get_glance_client(ksclient, endpoint) page_size = cfg.CONF.glance_page_size kwargs = {} if page_size > 0: kwargs['page_size'] = page_size return client.images.list(filters={"is_public": None}, **kwargs) def _iter_images(self, ksclient, cache, endpoint): """Iterate over all images.""" key = '%s-images' % endpoint if key not in cache: cache[key] = list(self._get_images(ksclient, endpoint)) return iter(cache[key]) @staticmethod def extract_image_metadata(image): return dict((k, getattr(image, k)) for k in [ "status", "is_public", "name", "deleted", "container_format", "created_at", "disk_format", "updated_at", "properties", "min_disk", "protected", "checksum", "deleted_at", "min_ram", "size", ]) class ImagePollster(_Base): def get_samples(self, manager, cache, resources): for endpoint in resources: for image in self._iter_images(manager.keystone, cache, endpoint): yield sample.Sample( name='image', type=sample.TYPE_GAUGE, unit='image', volume=1, user_id=None, project_id=image.owner, resource_id=image.id, timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_image_metadata(image), ) class ImageSizePollster(_Base): def get_samples(self, manager, cache, resources): for endpoint in resources: for image in self._iter_images(manager.keystone, cache, endpoint): yield sample.Sample( name='image.size', type=sample.TYPE_GAUGE, unit='B', volume=image.size, user_id=None, project_id=image.owner, resource_id=image.id, timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_image_metadata(image), ) ceilometer-6.1.5/ceilometer/image/__init__.py0000664000567000056710000000000013072744703022334 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/service_base.py0000664000567000056710000001364113072744706022167 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Hewlett Packard # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_log import log from oslo_service import service as os_service import six from ceilometer.i18n import _LE, _LI from ceilometer import pipeline LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class BaseService(os_service.Service): def clear_pipeline_validation_status(self): """Clears pipeline validation status flags.""" self.pipeline_validated = False self.event_pipeline_validated = False def init_pipeline_refresh(self): """Initializes pipeline refresh state.""" self.clear_pipeline_validation_status() if cfg.CONF.refresh_pipeline_cfg: self.set_pipeline_mtime(pipeline.get_pipeline_mtime()) self.set_pipeline_hash(pipeline.get_pipeline_hash()) if cfg.CONF.refresh_event_pipeline_cfg: self.set_pipeline_mtime(pipeline.get_pipeline_mtime( pipeline.EVENT_TYPE), pipeline.EVENT_TYPE) self.set_pipeline_hash(pipeline.get_pipeline_hash( pipeline.EVENT_TYPE), pipeline.EVENT_TYPE) if (cfg.CONF.refresh_pipeline_cfg or cfg.CONF.refresh_event_pipeline_cfg): self.tg.add_timer(cfg.CONF.pipeline_polling_interval, self.refresh_pipeline) def get_pipeline_mtime(self, p_type=pipeline.SAMPLE_TYPE): return (self.event_pipeline_mtime if p_type == pipeline.EVENT_TYPE else self.pipeline_mtime) def set_pipeline_mtime(self, mtime, p_type=pipeline.SAMPLE_TYPE): if p_type == pipeline.EVENT_TYPE: self.event_pipeline_mtime = mtime else: self.pipeline_mtime = mtime def get_pipeline_hash(self, p_type=pipeline.SAMPLE_TYPE): return (self.event_pipeline_hash if p_type == pipeline.EVENT_TYPE else self.pipeline_hash) def set_pipeline_hash(self, _hash, p_type=pipeline.SAMPLE_TYPE): if p_type == pipeline.EVENT_TYPE: self.event_pipeline_hash = _hash else: self.pipeline_hash = _hash @abc.abstractmethod def reload_pipeline(self): """Reload pipeline in the agents.""" def pipeline_changed(self, p_type=pipeline.SAMPLE_TYPE): """Returns hash of changed pipeline else False.""" pipeline_mtime = self.get_pipeline_mtime(p_type) mtime = pipeline.get_pipeline_mtime(p_type) if mtime > pipeline_mtime: LOG.info(_LI('Pipeline configuration file has been updated.')) self.set_pipeline_mtime(mtime, p_type) _hash = pipeline.get_pipeline_hash(p_type) pipeline_hash = self.get_pipeline_hash(p_type) if _hash != pipeline_hash: LOG.info(_LI("Detected change in pipeline configuration.")) return _hash return False def refresh_pipeline(self): """Refreshes appropriate pipeline, then delegates to agent.""" if cfg.CONF.refresh_pipeline_cfg: pipeline_hash = self.pipeline_changed() if pipeline_hash: try: # Pipeline in the notification agent. if hasattr(self, 'pipeline_manager'): self.pipeline_manager = pipeline.setup_pipeline() # Polling in the polling agent. elif hasattr(self, 'polling_manager'): self.polling_manager = pipeline.setup_polling() LOG.debug("Pipeline has been refreshed. " "old hash: %(old)s, new hash: %(new)s", {'old': self.pipeline_hash, 'new': pipeline_hash}) self.set_pipeline_hash(pipeline_hash) self.pipeline_validated = True except Exception as err: LOG.debug("Active pipeline config's hash is %s", self.pipeline_hash) LOG.exception(_LE('Unable to load changed pipeline: %s') % err) if cfg.CONF.refresh_event_pipeline_cfg: ev_pipeline_hash = self.pipeline_changed(pipeline.EVENT_TYPE) if ev_pipeline_hash: try: # Pipeline in the notification agent. if hasattr(self, 'event_pipeline_manager'): self.event_pipeline_manager = (pipeline. setup_event_pipeline()) LOG.debug("Event Pipeline has been refreshed. " "old hash: %(old)s, new hash: %(new)s", {'old': self.event_pipeline_hash, 'new': ev_pipeline_hash}) self.set_pipeline_hash(ev_pipeline_hash, pipeline.EVENT_TYPE) self.event_pipeline_validated = True except Exception as err: LOG.debug("Active event pipeline config's hash is %s", self.event_pipeline_hash) LOG.exception(_LE('Unable to load changed event pipeline:' ' %s') % err) if self.pipeline_validated or self.event_pipeline_validated: self.reload_pipeline() self.clear_pipeline_validation_status() ceilometer-6.1.5/ceilometer/meter/0000775000567000056710000000000013072745164020271 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/meter/notifications.py0000664000567000056710000002263413072744706023524 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import pkg_resources import six from debtcollector import moves from oslo_config import cfg from oslo_log import log import oslo_messaging from stevedore import extension from ceilometer.agent import plugin_base from ceilometer import declarative from ceilometer.i18n import _LE, _LW from ceilometer import sample from ceilometer import utils OPTS = [ cfg.StrOpt('meter_definitions_cfg_file', default="meters.yaml", help="Configuration file for defining meter notifications." ), ] cfg.CONF.register_opts(OPTS, group='meter') cfg.CONF.import_opt('disable_non_metric_meters', 'ceilometer.notification', group='notification') LOG = log.getLogger(__name__) MeterDefinitionException = moves.moved_class(declarative.DefinitionException, 'MeterDefinitionException', __name__, version=6.0, removal_version="?") class MeterDefinition(object): SAMPLE_ATTRIBUTES = ["name", "type", "volume", "unit", "timestamp", "user_id", "project_id", "resource_id"] REQUIRED_FIELDS = ['name', 'type', 'event_type', 'unit', 'volume', 'resource_id'] def __init__(self, definition_cfg, plugin_manager): self.cfg = definition_cfg missing = [field for field in self.REQUIRED_FIELDS if not self.cfg.get(field)] if missing: raise declarative.DefinitionException( _LE("Required fields %s not specified") % missing, self.cfg) self._event_type = self.cfg.get('event_type') if isinstance(self._event_type, six.string_types): self._event_type = [self._event_type] if ('type' not in self.cfg.get('lookup', []) and self.cfg['type'] not in sample.TYPES): raise declarative.DefinitionException( _LE("Invalid type %s specified") % self.cfg['type'], self.cfg) self._fallback_user_id = declarative.Definition( 'user_id', "_context_user_id|_context_user", plugin_manager) self._fallback_project_id = declarative.Definition( 'project_id', "_context_tenant_id|_context_tenant", plugin_manager) self._attributes = {} self._metadata_attributes = {} for name in self.SAMPLE_ATTRIBUTES: attr_cfg = self.cfg.get(name) if attr_cfg: self._attributes[name] = declarative.Definition( name, attr_cfg, plugin_manager) metadata = self.cfg.get('metadata', {}) for name in metadata: self._metadata_attributes[name] = declarative.Definition( name, metadata[name], plugin_manager) # List of fields we expected when multiple meter are in the payload self.lookup = self.cfg.get('lookup') if isinstance(self.lookup, six.string_types): self.lookup = [self.lookup] def match_type(self, meter_name): for t in self._event_type: if utils.match(meter_name, t): return True def to_samples(self, message, all_values=False): # Sample defaults sample = { 'name': self.cfg["name"], 'type': self.cfg["type"], 'unit': self.cfg["unit"], 'volume': None, 'timestamp': None, 'user_id': self._fallback_user_id.parse(message), 'project_id': self._fallback_project_id.parse(message), 'resource_id': None, 'message': message, 'metadata': {}, } for name, parser in self._metadata_attributes.items(): value = parser.parse(message) if value: sample['metadata'][name] = value # NOTE(sileht): We expect multiple samples in the payload # so put each attribute into a list if self.lookup: for name in sample: sample[name] = [sample[name]] for name in self.SAMPLE_ATTRIBUTES: parser = self._attributes.get(name) if parser is not None: value = parser.parse(message, bool(self.lookup)) # NOTE(sileht): If we expect multiple samples # some attributes and overriden even we doesn't get any # result. Also note in this case value is always a list if ((not self.lookup and value is not None) or (self.lookup and ((name in self.lookup + ["name"]) or value))): sample[name] = value if self.lookup: nb_samples = len(sample['name']) # skip if no meters in payload if nb_samples <= 0: raise StopIteration attributes = self.SAMPLE_ATTRIBUTES + ["message", "metadata"] samples_values = [] for name in attributes: values = sample.get(name) nb_values = len(values) if nb_values == nb_samples: samples_values.append(values) elif nb_values == 1 and name not in self.lookup: samples_values.append(itertools.cycle(values)) else: nb = (0 if nb_values == 1 and values[0] is None else nb_values) LOG.warning('Only %(nb)d fetched meters contain ' '"%(name)s" field instead of %(total)d.' % dict(name=name, nb=nb, total=nb_samples)) raise StopIteration # NOTE(sileht): Transform the sample with multiple values per # attribute into multiple samples with one value per attribute. for values in zip(*samples_values): yield dict((attributes[idx], value) for idx, value in enumerate(values)) else: yield sample class ProcessMeterNotifications(plugin_base.NotificationBase): event_types = [] def __init__(self, manager): super(ProcessMeterNotifications, self).__init__(manager) self.definitions = self._load_definitions() @staticmethod def _load_definitions(): plugin_manager = extension.ExtensionManager( namespace='ceilometer.event.trait_plugin') meters_cfg = declarative.load_definitions( {}, cfg.CONF.meter.meter_definitions_cfg_file, pkg_resources.resource_filename(__name__, "data/meters.yaml")) definitions = {} for meter_cfg in reversed(meters_cfg['metric']): if meter_cfg.get('name') in definitions: # skip duplicate meters LOG.warning(_LW("Skipping duplicate meter definition %s") % meter_cfg) continue if (meter_cfg.get('volume') != 1 or not cfg.CONF.notification.disable_non_metric_meters): try: md = MeterDefinition(meter_cfg, plugin_manager) except declarative.DefinitionException as me: errmsg = (_LE("Error loading meter definition : %(err)s") % dict(err=six.text_type(me))) LOG.error(errmsg) else: definitions[meter_cfg['name']] = md return definitions.values() def get_targets(self, conf): """Return a sequence of oslo_messaging.Target It is defining the exchange and topics to be connected for this plugin. :param conf: Configuration. #TODO(prad): This should be defined in the notification agent """ targets = [] exchanges = [ conf.nova_control_exchange, conf.cinder_control_exchange, conf.glance_control_exchange, conf.neutron_control_exchange, conf.heat_control_exchange, conf.keystone_control_exchange, conf.sahara_control_exchange, conf.trove_control_exchange, conf.zaqar_control_exchange, conf.swift_control_exchange, conf.magnetodb_control_exchange, conf.ceilometer_control_exchange, conf.magnum_control_exchange, conf.dns_control_exchange, ] for exchange in exchanges: targets.extend(oslo_messaging.Target(topic=topic, exchange=exchange) for topic in self.get_notification_topics(conf)) return targets def process_notification(self, notification_body): for d in self.definitions: if d.match_type(notification_body['event_type']): for s in d.to_samples(notification_body): yield sample.Sample.from_notification(**s) ceilometer-6.1.5/ceilometer/meter/data/0000775000567000056710000000000013072745164021202 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/meter/data/meters.yaml0000664000567000056710000005306713072744706023401 0ustar jenkinsjenkins00000000000000--- metric: # Image - name: "image.size" event_type: - "image.upload" - "image.delete" - "image.update" type: "gauge" unit: B volume: $.payload.size resource_id: $.payload.id project_id: $.payload.owner - name: "image.download" event_type: "image.send" type: "delta" unit: "B" volume: $.payload.bytes_sent resource_id: $.payload.image_id user_id: $.payload.receiver_user_id project_id: $.payload.receiver_tenant_id - name: "image.serve" event_type: "image.send" type: "delta" unit: "B" volume: $.payload.bytes_sent resource_id: $.payload.image_id project_id: $.payload.owner_id # MagnetoDB - name: 'magnetodb.table.index.count' type: 'gauge' unit: 'index' event_type: 'magnetodb.table.create.end' volume: $.payload.index_count resource_id: $.payload.table_uuid user_id: $._context_user - name: 'volume.size' event_type: - 'volume.exists' - 'volume.create.*' - 'volume.delete.*' - 'volume.resize.*' - 'volume.attach.*' - 'volume.detach.*' - 'volume.update.*' type: 'gauge' unit: 'GB' volume: $.payload.size user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.volume_id - name: 'snapshot.size' event_type: - 'snapshot.exists' - 'snapshot.create.*' - 'snapshot.delete.*' type: 'gauge' unit: 'GB' volume: $.payload.volume_size user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.snapshot_id # Magnum - name: $.payload.metrics.[*].name event_type: 'magnum.bay.metrics.*' type: 'gauge' unit: $.payload.metrics.[*].unit volume: $.payload.metrics.[*].value user_id: $.payload.user_id project_id: $.payload.project_id resource_id: $.payload.resource_id lookup: ['name', 'unit', 'volume'] # Swift - name: $.payload.measurements.[*].metric.[*].name event_type: 'objectstore.http.request' type: 'delta' unit: $.payload.measurements.[*].metric.[*].unit volume: $.payload.measurements.[*].result resource_id: $.payload.target.id user_id: $.payload.initiator.id project_id: $.payload.initiator.project_id lookup: ['name', 'unit', 'volume'] - name: 'memory' event_type: 'compute.instance.*' type: 'gauge' unit: 'MB' volume: $.payload.memory_mb user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id - name: 'vcpus' event_type: 'compute.instance.*' type: 'gauge' unit: 'vcpu' volume: $.payload.vcpus user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id - name: 'compute.instance.booting.time' event_type: 'compute.instance.create.end' type: 'gauge' unit: 'sec' volume: fields: [$.payload.created_at, $.payload.launched_at] plugin: 'timedelta' project_id: $.payload.tenant_id resource_id: $.payload.instance_id - name: 'disk.root.size' event_type: 'compute.instance.*' type: 'gauge' unit: 'GB' volume: $.payload.root_gb user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id - name: 'disk.ephemeral.size' event_type: 'compute.instance.*' type: 'gauge' unit: 'GB' volume: $.payload.ephemeral_gb user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id - name: 'bandwidth' event_type: 'l3.meter' type: 'delta' unit: 'B' volume: $.payload.bytes project_id: $.payload.tenant_id resource_id: $.payload.label_id - name: 'compute.node.cpu.frequency' event_type: 'compute.metrics.update' type: 'gauge' unit: 'MHz' volume: $.payload.metrics[?(@.name='cpu.frequency')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.frequency')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.frequency')].source - name: 'compute.node.cpu.user.time' event_type: 'compute.metrics.update' type: 'cumulative' unit: 'ns' volume: $.payload.metrics[?(@.name='cpu.user.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.user.time')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.user.time')].source - name: 'compute.node.cpu.kernel.time' event_type: 'compute.metrics.update' type: 'cumulative' unit: 'ns' volume: $.payload.metrics[?(@.name='cpu.kernel.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.kernel.time')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.kernel.time')].source - name: 'compute.node.cpu.idle.time' event_type: 'compute.metrics.update' type: 'cumulative' unit: 'ns' volume: $.payload.metrics[?(@.name='cpu.idle.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.idle.time')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.idle.time')].source - name: 'compute.node.cpu.iowait.time' event_type: 'compute.metrics.update' type: 'cumulative' unit: 'ns' volume: $.payload.metrics[?(@.name='cpu.iowait.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.iowait.time')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.iowait.time')].source - name: 'compute.node.cpu.kernel.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.kernel.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.kernel.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.kernel.percent')].source - name: 'compute.node.cpu.idle.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.idle.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.idle.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.idle.percent')].source - name: 'compute.node.cpu.user.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.user.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.user.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.user.percent')].source - name: 'compute.node.cpu.iowait.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.iowait.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.iowait.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.iowait.percent')].source - name: 'compute.node.cpu.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.percent')].source # DNS - name: 'dns.domain.exists' event_type: 'dns.domain.exists' type: 'cumulative' unit: 's' volume: fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending] plugin: 'timedelta' project_id: $.payload.tenant_id resource_id: $.payload.id user_id: $._context_user metadata: status: $.payload.status pool_id: $.payload.pool_id host: $.publisher_id # Trove - name: 'trove.instance.exists' event_type: 'trove.instance.exists' type: 'cumulative' unit: 's' volume: fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending] plugin: 'timedelta' project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_id: $.payload.user_id metadata: nova_instance_id: $.payload.nova_instance_id state: $.payload.state service_id: $.payload.service_id instance_type: $.payload.instance_type instance_type_id: $.payload.instance_type_id # NOTE: non-metric meters are generally events/existence meters # These are DEPRECATED in current release and expected to be # REMOVED in the next upcoming release. # # Image - name: "image" event_type: - "image.upload" - "image.update" - "image.delete" type: "gauge" unit: 'image' volume: 1 resource_id: $.payload.id project_id: $.payload.owner - name: "image.upload" event_type: - "image.upload" type: "gauge" unit: 'image' volume: 1 resource_id: $.payload.id project_id: $.payload.owner - name: "image.delete" event_type: - "image.delete" type: "gauge" unit: 'image' volume: 1 resource_id: $.payload.id project_id: $.payload.owner - name: "image.update" event_type: - "image.update" type: "gauge" unit: 'image' volume: 1 resource_id: $.payload.id project_id: $.payload.owner # Orchestration - name: 'stack.create' event_type: - 'orchestration.stack.create.end' type: 'delta' unit: 'stack' volume: 1 user_id: _context_trustor_user_id project_id: $.payload.tenant_id resource_id: $.payload.stack_identity - name: 'stack.update' event_type: - 'orchestration.stack.update.end' type: 'delta' unit: 'stack' volume: 1 user_id: _context_trustor_user_id project_id: $.payload.tenant_id resource_id: $.payload.stack_identity - name: 'stack.delete' event_type: - 'orchestration.stack.delete.end' type: 'delta' unit: 'stack' volume: 1 user_id: _context_trustor_user_id project_id: $.payload.tenant_id resource_id: $.payload.stack_identity - name: 'stack.resume' event_type: - 'orchestration.stack.resume.end' type: 'delta' unit: 'stack' volume: 1 user_id: _context_trustor_user_id project_id: $.payload.tenant_id resource_id: $.payload.stack_identity - name: 'stack.suspend' event_type: - 'orchestration.stack.suspend.end' type: 'delta' unit: 'stack' volume: 1 user_id: _context_trustor_user_id project_id: $.payload.tenant_id resource_id: $.payload.stack_identity # MagnetoDB - name: 'magnetodb.table.create' type: 'gauge' unit: 'table' volume: 1 event_type: 'magnetodb.table.create.end' resource_id: $.payload.table_uuid user_id: _context_user project_id: _context_tenant - name: 'magnetodb.table.delete' type: 'gauge' unit: 'table' volume: 1 event_type: 'magnetodb.table.delete.end' resource_id: $.payload.table_uuid user_id: _context_user project_id: _context_tenant # Volume - name: 'volume' type: 'gauge' unit: 'volume' volume: 1 event_type: - 'volume.exists' - 'volume.create.*' - 'volume.delete.*' - 'volume.resize.*' - 'volume.attach.*' - 'volume.detach.*' - 'volume.update.*' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.exists' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.exists' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.create.start' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.create.start' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.create.end' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.create.end' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.delete.start' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.delete.start' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.delete.end' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.delete.end' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.update.end' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.update.end' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.update.start' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.update.start' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.resize.end' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.resize.end' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.resize.start' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.resize.start' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.attach.end' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.attach.end' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.attach.start' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.attach.start' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.detach.end' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.detach.end' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.detach.start' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.detach.start' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id # Volume Snapshot - name: 'snapshot' type: 'gauge' unit: 'snapshot' volume: 1 event_type: - 'snapshot.exists' - 'snapshot.create.*' - 'snapshot.delete.*' resource_id: $.payload.snapshot_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'snapshot.exists' type: 'delta' unit: 'snapshot' volume: 1 event_type: - 'snapshot.exists' resource_id: $.payload.snapshot_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'snapshot.create.start' type: 'delta' unit: 'snapshot' volume: 1 event_type: - 'snapshot.create.start' resource_id: $.payload.snapshot_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'snapshot.create.end' type: 'delta' unit: 'snapshot' volume: 1 event_type: - 'snapshot.create.end' resource_id: $.payload.snapshot_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'snapshot.delete.start' type: 'delta' unit: 'snapshot' volume: 1 event_type: - 'snapshot.delete.start' resource_id: $.payload.snapshot_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'snapshot.delete.end' type: 'delta' unit: 'snapshot' volume: 1 event_type: - 'snapshot.delete.end' resource_id: $.payload.snapshot_id user_id: $.payload.user_id project_id: $.payload.tenant_id # Sahara - name: 'cluster.create' type: 'delta' unit: 'cluster' volume: 1 event_type: - 'sahara.cluster.create' resource_id: $.payload.cluster_id project_id: $.payload.project_id - name: 'cluster.update' type: 'delta' unit: 'cluster' volume: 1 event_type: - 'sahara.cluster.update' resource_id: $.payload.cluster_id project_id: $.payload.project_id - name: 'cluster.delete' type: 'delta' unit: 'cluster' volume: 1 event_type: - 'sahara.cluster.delete' resource_id: $.payload.cluster_id project_id: $.payload.project_id # Identity - name: 'identity.user.created' type: 'delta' unit: 'user' volume: 1 event_type: - 'identity.user.created' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.user.updated' type: 'delta' unit: 'user' volume: 1 event_type: - 'identity.user.updated' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.user.deleted' type: 'delta' unit: 'user' volume: 1 event_type: - 'identity.user.deleted' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.group.created' type: 'delta' unit: 'group' volume: 1 event_type: - 'identity.group.created' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.group.updated' type: 'delta' unit: 'group' volume: 1 event_type: - 'identity.group.updated' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.group.deleted' type: 'delta' unit: 'group' volume: 1 event_type: - 'identity.group.deleted' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.project.created' type: 'delta' unit: 'project' volume: 1 event_type: - 'identity.project.created' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.project.updated' type: 'delta' unit: 'project' volume: 1 event_type: - 'identity.project.updated' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.project.deleted' type: 'delta' unit: 'project' volume: 1 event_type: - 'identity.project.deleted' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.role.created' type: 'delta' unit: 'role' volume: 1 event_type: - 'identity.role.created' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.role.updated' type: 'delta' unit: 'role' volume: 1 event_type: - 'identity.role.updated' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.role.deleted' type: 'delta' unit: 'role' volume: 1 event_type: - 'identity.role.deleted' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.role_assignment.created' type: 'delta' unit: 'role_assignment' volume: 1 event_type: - 'identity.role_assignment.created' resource_id: $.payload.role user_id: $.payload.initiator.id - name: 'identity.role_assignment.deleted' type: 'delta' unit: 'role_assignment' volume: 1 event_type: - 'identity.role_assignment.deleted' resource_id: $.payload.role user_id: $.payload.initiator.id - name: 'identity.authenticate.success' type: 'delta' unit: 'user' volume: 1 event_type: - 'identity.authenticate' resource_id: $.payload.initiator.id user_id: $.payload.initiator.id - name: 'identity.authenticate.pending' type: 'delta' unit: 'user' volume: 1 event_type: - 'identity.authenticate' resource_id: $.payload.initiator.id user_id: $.payload.initiator.id - name: 'identity.authenticate.failure' type: 'delta' unit: 'user' volume: 1 event_type: - 'identity.authenticate' resource_id: $.payload.initiator.id user_id: $.payload.initiator.id - name: 'identity.trust.created' type: 'delta' unit: 'trust' volume: 1 event_type: - 'identity.OS-TRUST:trust.created' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.trust.deleted' type: 'delta' unit: 'trust' volume: 1 event_type: - 'identity.OS-TRUST:trust.deleted' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'storage.api.request' type: 'delta' unit: 'request' volume: 1 event_type: - 'objectstore.http.request' resource_id: $.payload.target.id user_id: $.payload.initiator.id project_id: $.payload.initiator.project_id - name: '$.payload.name' event_type: 'profiler.*' type: 'gauge' unit: 'trace' volume: 1 user_id: $.payload.user_id project_id: $.payload.project_id resource_id: '"profiler-" + $.payload.base_id' ceilometer-6.1.5/ceilometer/meter/__init__.py0000664000567000056710000000000013072744703022366 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/compute/0000775000567000056710000000000013072745164020631 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/compute/util.py0000664000567000056710000000456513072744705022172 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import six # Below config is for collecting metadata which user defined in nova or else, # and then storing it to Sample for future use according to user's requirement. # Such as using it as OpenTSDB tags for metrics. OPTS = [ cfg.ListOpt('reserved_metadata_namespace', default=['metering.'], help='List of metadata prefixes reserved for metering use.'), cfg.IntOpt('reserved_metadata_length', default=256, help='Limit on length of reserved metadata values.'), cfg.ListOpt('reserved_metadata_keys', default=[], help='List of metadata keys reserved for metering use. And ' 'these keys are additional to the ones included in the ' 'namespace.'), ] cfg.CONF.register_opts(OPTS) def add_reserved_user_metadata(src_metadata, dest_metadata): limit = cfg.CONF.reserved_metadata_length user_metadata = {} for prefix in cfg.CONF.reserved_metadata_namespace: md = dict( (k[len(prefix):].replace('.', '_'), v[:limit] if isinstance(v, six.string_types) else v) for k, v in src_metadata.items() if (k.startswith(prefix) and k[len(prefix):].replace('.', '_') not in dest_metadata) ) user_metadata.update(md) for metadata_key in cfg.CONF.reserved_metadata_keys: md = dict( (k.replace('.', '_'), v[:limit] if isinstance(v, six.string_types) else v) for k, v in src_metadata.items() if (k == metadata_key and k.replace('.', '_') not in dest_metadata) ) user_metadata.update(md) if user_metadata: dest_metadata['user_metadata'] = user_metadata return dest_metadata ceilometer-6.1.5/ceilometer/compute/discovery.py0000664000567000056710000000662213072744705023220 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import timeutils from ceilometer.agent import plugin_base from ceilometer import nova_client OPTS = [ cfg.BoolOpt('workload_partitioning', default=False, help='Enable work-load partitioning, allowing multiple ' 'compute agents to be run simultaneously.'), cfg.IntOpt('resource_update_interval', default=0, min=0, help="New instances will be discovered periodically based" " on this option (in seconds). By default, " "the agent discovers instances according to pipeline " "polling interval. If option is greater than 0, " "the instance list to poll will be updated based " "on this option's interval. Measurements relating " "to the instances will match intervals " "defined in pipeline.") ] cfg.CONF.register_opts(OPTS, group='compute') class InstanceDiscovery(plugin_base.DiscoveryBase): def __init__(self): super(InstanceDiscovery, self).__init__() self.nova_cli = nova_client.Client() self.last_run = None self.instances = {} self.expiration_time = cfg.CONF.compute.resource_update_interval def discover(self, manager, param=None): """Discover resources to monitor.""" secs_from_last_update = 0 if self.last_run: secs_from_last_update = timeutils.delta_seconds( self.last_run, timeutils.utcnow(True)) instances = [] # NOTE(ityaptin) we update make a nova request only if # it's a first discovery or resources expired if not self.last_run or secs_from_last_update >= self.expiration_time: try: utc_now = timeutils.utcnow(True) since = self.last_run.isoformat() if self.last_run else None instances = self.nova_cli.instance_get_all_by_host( cfg.CONF.host, since) self.last_run = utc_now except Exception: # NOTE(zqfan): instance_get_all_by_host is wrapped and will log # exception when there is any error. It is no need to raise it # again and print one more time. return [] for instance in instances: if getattr(instance, 'OS-EXT-STS:vm_state', None) in ['deleted', 'error']: self.instances.pop(instance.id, None) else: self.instances[instance.id] = instance return self.instances.values() @property def group_id(self): if cfg.CONF.compute.workload_partitioning: return cfg.CONF.host else: return None ceilometer-6.1.5/ceilometer/compute/pollsters/0000775000567000056710000000000013072745164022660 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/compute/pollsters/util.py0000664000567000056710000000655313072744705024220 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils from ceilometer.compute import util as compute_util from ceilometer import sample INSTANCE_PROPERTIES = [ # Identity properties 'reservation_id', # Type properties 'architecture', 'OS-EXT-AZ:availability_zone', 'kernel_id', 'os_type', 'ramdisk_id', ] def _get_metadata_from_object(instance): """Return a metadata dictionary for the instance.""" instance_type = instance.flavor['name'] if instance.flavor else None metadata = { 'display_name': instance.name, 'name': getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', u''), 'instance_id': instance.id, 'instance_type': instance_type, 'host': instance.hostId, 'instance_host': getattr(instance, 'OS-EXT-SRV-ATTR:host', u''), 'flavor': instance.flavor, 'status': instance.status.lower(), 'state': getattr(instance, 'OS-EXT-STS:vm_state', u''), } # Image properties if instance.image: metadata['image'] = instance.image metadata['image_ref'] = instance.image['id'] # Images that come through the conductor API in the nova notifier # plugin will not have links. if instance.image.get('links'): metadata['image_ref_url'] = instance.image['links'][0]['href'] else: metadata['image_ref_url'] = None else: metadata['image'] = None metadata['image_ref'] = None metadata['image_ref_url'] = None for name in INSTANCE_PROPERTIES: if hasattr(instance, name): metadata[name] = getattr(instance, name) metadata['vcpus'] = instance.flavor['vcpus'] metadata['memory_mb'] = instance.flavor['ram'] metadata['disk_gb'] = instance.flavor['disk'] metadata['ephemeral_gb'] = instance.flavor['ephemeral'] metadata['root_gb'] = (int(metadata['disk_gb']) - int(metadata['ephemeral_gb'])) return compute_util.add_reserved_user_metadata(instance.metadata, metadata) def make_sample_from_instance(instance, name, type, unit, volume, resource_id=None, additional_metadata=None): additional_metadata = additional_metadata or {} resource_metadata = _get_metadata_from_object(instance) resource_metadata.update(additional_metadata) return sample.Sample( name=name, type=type, unit=unit, volume=volume, user_id=instance.user_id, project_id=instance.tenant_id, resource_id=resource_id or instance.id, timestamp=timeutils.utcnow().isoformat(), resource_metadata=resource_metadata, ) def instance_name(instance): """Shortcut to get instance name.""" return getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', None) ceilometer-6.1.5/ceilometer/compute/pollsters/cpu.py0000664000567000056710000001012413072744705024017 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import ceilometer from ceilometer.compute import pollsters from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.i18n import _ from ceilometer import sample LOG = log.getLogger(__name__) class CPUPollster(pollsters.BaseComputePollster): def get_samples(self, manager, cache, resources): for instance in resources: LOG.debug('checking instance %s', instance.id) try: cpu_info = self.inspector.inspect_cpus(instance) LOG.debug("CPUTIME USAGE: %(instance)s %(time)d", {'instance': instance, 'time': cpu_info.time}) cpu_num = {'cpu_number': cpu_info.number} yield util.make_sample_from_instance( instance, name='cpu', type=sample.TYPE_CUMULATIVE, unit='ns', volume=cpu_info.time, additional_metadata=cpu_num, ) except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.debug('Instance %(instance_id)s was shut off while ' 'getting samples of %(pollster)s: %(exc)s', {'instance_id': instance.id, 'pollster': self.__class__.__name__, 'exc': e}) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('Obtaining CPU time is not implemented for %s', self.inspector.__class__.__name__) except Exception as err: LOG.exception(_('could not get CPU time for %(id)s: %(e)s'), {'id': instance.id, 'e': err}) class CPUUtilPollster(pollsters.BaseComputePollster): def get_samples(self, manager, cache, resources): self._inspection_duration = self._record_poll_time() for instance in resources: LOG.debug('Checking CPU util for instance %s', instance.id) try: cpu_info = self.inspector.inspect_cpu_util( instance, self._inspection_duration) LOG.debug("CPU UTIL: %(instance)s %(util)d", {'instance': instance, 'util': cpu_info.util}) yield util.make_sample_from_instance( instance, name='cpu_util', type=sample.TYPE_GAUGE, unit='%', volume=cpu_info.util, ) except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('Obtaining CPU Util is not implemented for %s', self.inspector.__class__.__name__) except Exception as err: LOG.exception(_('Could not get CPU Util for %(id)s: %(e)s'), {'id': instance.id, 'e': err}) ceilometer-6.1.5/ceilometer/compute/pollsters/net.py0000664000567000056710000001546013072744705024026 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log import ceilometer from ceilometer.compute import pollsters from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.i18n import _ from ceilometer import sample LOG = log.getLogger(__name__) class _Base(pollsters.BaseComputePollster): NET_USAGE_MESSAGE = ' '.join(["NETWORK USAGE:", "%s %s:", "read-bytes=%d", "write-bytes=%d"]) @staticmethod def make_vnic_sample(instance, name, type, unit, volume, vnic_data): metadata = copy.copy(vnic_data) additional_metadata = dict(zip(metadata._fields, metadata)) if vnic_data.fref is not None: rid = vnic_data.fref additional_metadata['vnic_name'] = vnic_data.fref else: instance_name = util.instance_name(instance) rid = "%s-%s-%s" % (instance_name, instance.id, vnic_data.name) additional_metadata['vnic_name'] = vnic_data.name return util.make_sample_from_instance( instance=instance, name=name, type=type, unit=unit, volume=volume, resource_id=rid, additional_metadata=additional_metadata ) CACHE_KEY_VNIC = 'vnics' def _get_vnic_info(self, inspector, instance): return inspector.inspect_vnics(instance) @staticmethod def _get_rx_info(info): return info.rx_bytes @staticmethod def _get_tx_info(info): return info.tx_bytes def _get_vnics_for_instance(self, cache, inspector, instance): i_cache = cache.setdefault(self.CACHE_KEY_VNIC, {}) if instance.id not in i_cache: i_cache[instance.id] = list( self._get_vnic_info(inspector, instance) ) return i_cache[instance.id] def get_samples(self, manager, cache, resources): self._inspection_duration = self._record_poll_time() for instance in resources: instance_name = util.instance_name(instance) LOG.debug('checking net info for instance %s', instance.id) try: vnics = self._get_vnics_for_instance( cache, self.inspector, instance, ) for vnic, info in vnics: LOG.debug(self.NET_USAGE_MESSAGE, instance_name, vnic.name, self._get_rx_info(info), self._get_tx_info(info)) yield self._get_sample(instance, vnic, info) except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.debug('Instance %(instance_id)s was shut off while ' 'getting samples of %(pollster)s: %(exc)s', {'instance_id': instance.id, 'pollster': self.__class__.__name__, 'exc': e}) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('%(inspector)s does not provide data for ' ' %(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) except Exception as err: LOG.exception(_('Ignoring instance %(name)s: %(error)s'), {'name': instance_name, 'error': err}) class _RateBase(_Base): NET_USAGE_MESSAGE = ' '.join(["NETWORK RATE:", "%s %s:", "read-bytes-rate=%d", "write-bytes-rate=%d"]) CACHE_KEY_VNIC = 'vnic-rates' def _get_vnic_info(self, inspector, instance): return inspector.inspect_vnic_rates(instance, self._inspection_duration) @staticmethod def _get_rx_info(info): return info.rx_bytes_rate @staticmethod def _get_tx_info(info): return info.tx_bytes_rate class IncomingBytesPollster(_Base): def _get_sample(self, instance, vnic, info): return self.make_vnic_sample( instance, name='network.incoming.bytes', type=sample.TYPE_CUMULATIVE, unit='B', volume=info.rx_bytes, vnic_data=vnic, ) class IncomingPacketsPollster(_Base): def _get_sample(self, instance, vnic, info): return self.make_vnic_sample( instance, name='network.incoming.packets', type=sample.TYPE_CUMULATIVE, unit='packet', volume=info.rx_packets, vnic_data=vnic, ) class OutgoingBytesPollster(_Base): def _get_sample(self, instance, vnic, info): return self.make_vnic_sample( instance, name='network.outgoing.bytes', type=sample.TYPE_CUMULATIVE, unit='B', volume=info.tx_bytes, vnic_data=vnic, ) class OutgoingPacketsPollster(_Base): def _get_sample(self, instance, vnic, info): return self.make_vnic_sample( instance, name='network.outgoing.packets', type=sample.TYPE_CUMULATIVE, unit='packet', volume=info.tx_packets, vnic_data=vnic, ) class IncomingBytesRatePollster(_RateBase): def _get_sample(self, instance, vnic, info): return self.make_vnic_sample( instance, name='network.incoming.bytes.rate', type=sample.TYPE_GAUGE, unit='B/s', volume=info.rx_bytes_rate, vnic_data=vnic, ) class OutgoingBytesRatePollster(_RateBase): def _get_sample(self, instance, vnic, info): return self.make_vnic_sample( instance, name='network.outgoing.bytes.rate', type=sample.TYPE_GAUGE, unit='B/s', volume=info.tx_bytes_rate, vnic_data=vnic, ) ceilometer-6.1.5/ceilometer/compute/pollsters/memory.py0000664000567000056710000001257413072744705024553 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import ceilometer from ceilometer.compute import pollsters from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.i18n import _, _LE, _LW from ceilometer import sample LOG = log.getLogger(__name__) class MemoryUsagePollster(pollsters.BaseComputePollster): def get_samples(self, manager, cache, resources): self._inspection_duration = self._record_poll_time() for instance in resources: LOG.debug('Checking memory usage for instance %s', instance.id) try: memory_info = self.inspector.inspect_memory_usage( instance, self._inspection_duration) LOG.debug("MEMORY USAGE: %(instance)s %(usage)f", {'instance': instance, 'usage': memory_info.usage}) yield util.make_sample_from_instance( instance, name='memory.usage', type=sample.TYPE_GAUGE, unit='MB', volume=memory_info.usage, ) except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.debug('Instance %(instance_id)s was shut off while ' 'getting samples of %(pollster)s: %(exc)s', {'instance_id': instance.id, 'pollster': self.__class__.__name__, 'exc': e}) except virt_inspector.NoDataException as e: LOG.warning(_LW('Cannot inspect data of %(pollster)s for ' '%(instance_id)s, non-fatal reason: %(exc)s'), {'pollster': self.__class__.__name__, 'instance_id': instance.id, 'exc': e}) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('Obtaining Memory Usage is not implemented for %s', self.inspector.__class__.__name__) except Exception as err: LOG.exception(_('Could not get Memory Usage for ' '%(id)s: %(e)s'), {'id': instance.id, 'e': err}) class MemoryResidentPollster(pollsters.BaseComputePollster): def get_samples(self, manager, cache, resources): self._inspection_duration = self._record_poll_time() for instance in resources: LOG.debug('Checking resident memory for instance %s', instance.id) try: memory_info = self.inspector.inspect_memory_resident( instance, self._inspection_duration) LOG.debug("RESIDENT MEMORY: %(instance)s %(resident)f", {'instance': instance, 'resident': memory_info.resident}) yield util.make_sample_from_instance( instance, name='memory.resident', type=sample.TYPE_GAUGE, unit='MB', volume=memory_info.resident, ) except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.debug('Instance %(instance_id)s was shut off while ' 'getting samples of %(pollster)s: %(exc)s', {'instance_id': instance.id, 'pollster': self.__class__.__name__, 'exc': e}) except virt_inspector.NoDataException as e: LOG.warning(_LW('Cannot inspect data of %(pollster)s for ' '%(instance_id)s, non-fatal reason: %(exc)s'), {'pollster': self.__class__.__name__, 'instance_id': instance.id, 'exc': e}) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('Obtaining Resident Memory is not implemented' ' for %s', self.inspector.__class__.__name__) except Exception as err: LOG.exception(_LE('Could not get Resident Memory Usage for ' '%(id)s: %(e)s'), {'id': instance.id, 'e': err}) ceilometer-6.1.5/ceilometer/compute/pollsters/disk.py0000664000567000056710000006422013072744705024170 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # Copyright 2014 Cisco Systems, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections from oslo_log import log import six import ceilometer from ceilometer.compute import pollsters from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.i18n import _ from ceilometer import sample LOG = log.getLogger(__name__) DiskIOData = collections.namedtuple( 'DiskIOData', 'r_bytes r_requests w_bytes w_requests per_disk_requests', ) DiskRateData = collections.namedtuple('DiskRateData', ['read_bytes_rate', 'read_requests_rate', 'write_bytes_rate', 'write_requests_rate', 'per_disk_rate']) DiskLatencyData = collections.namedtuple('DiskLatencyData', ['disk_latency', 'per_disk_latency']) DiskIOPSData = collections.namedtuple('DiskIOPSData', ['iops_count', 'per_disk_iops']) DiskInfoData = collections.namedtuple('DiskInfoData', ['capacity', 'allocation', 'physical', 'per_disk_info']) @six.add_metaclass(abc.ABCMeta) class _Base(pollsters.BaseComputePollster): DISKIO_USAGE_MESSAGE = ' '.join(["DISKIO USAGE:", "%s %s:", "read-requests=%d", "read-bytes=%d", "write-requests=%d", "write-bytes=%d", "errors=%d", ]) CACHE_KEY_DISK = 'diskio' def _populate_cache(self, inspector, cache, instance): i_cache = cache.setdefault(self.CACHE_KEY_DISK, {}) if instance.id not in i_cache: r_bytes = 0 r_requests = 0 w_bytes = 0 w_requests = 0 per_device_read_bytes = {} per_device_read_requests = {} per_device_write_bytes = {} per_device_write_requests = {} for disk, info in inspector.inspect_disks(instance): LOG.debug(self.DISKIO_USAGE_MESSAGE, instance, disk.device, info.read_requests, info.read_bytes, info.write_requests, info.write_bytes, info.errors) r_bytes += info.read_bytes r_requests += info.read_requests w_bytes += info.write_bytes w_requests += info.write_requests # per disk data per_device_read_bytes[disk.device] = info.read_bytes per_device_read_requests[disk.device] = info.read_requests per_device_write_bytes[disk.device] = info.write_bytes per_device_write_requests[disk.device] = info.write_requests per_device_requests = { 'read_bytes': per_device_read_bytes, 'read_requests': per_device_read_requests, 'write_bytes': per_device_write_bytes, 'write_requests': per_device_write_requests, } i_cache[instance.id] = DiskIOData( r_bytes=r_bytes, r_requests=r_requests, w_bytes=w_bytes, w_requests=w_requests, per_disk_requests=per_device_requests, ) return i_cache[instance.id] @abc.abstractmethod def _get_samples(instance, c_data): """Return one or more Sample.""" @staticmethod def _get_sample_read_and_write(instance, _name, _unit, c_data, _volume, _metadata): """Read / write Pollster and return one Sample""" return [util.make_sample_from_instance( instance, name=_name, type=sample.TYPE_CUMULATIVE, unit=_unit, volume=getattr(c_data, _volume), additional_metadata={ 'device': c_data.per_disk_requests[_metadata].keys()}, )] @staticmethod def _get_samples_per_device(c_data, _attr, instance, _name, _unit): """Return one or more Samples for meter 'disk.device.*'""" samples = [] for disk, value in six.iteritems(c_data.per_disk_requests[_attr]): samples.append(util.make_sample_from_instance( instance, name=_name, type=sample.TYPE_CUMULATIVE, unit=_unit, volume=value, resource_id="%s-%s" % (instance.id, disk), additional_metadata={'disk_name': disk}, )) return samples def get_samples(self, manager, cache, resources): for instance in resources: instance_name = util.instance_name(instance) try: c_data = self._populate_cache( self.inspector, cache, instance, ) for s in self._get_samples(instance, c_data): yield s except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.debug('Instance %(instance_id)s was shut off while ' 'getting samples of %(pollster)s: %(exc)s', {'instance_id': instance.id, 'pollster': self.__class__.__name__, 'exc': e}) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('%(inspector)s does not provide data for ' ' %(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) except Exception as err: LOG.exception(_('Ignoring instance %(name)s: %(error)s'), {'name': instance_name, 'error': err}) class ReadRequestsPollster(_Base): def _get_samples(self, instance, c_data): return self._get_sample_read_and_write( instance, 'disk.read.requests', 'request', c_data, 'r_requests', 'read_requests') class PerDeviceReadRequestsPollster(_Base): def _get_samples(self, instance, c_data): return self._get_samples_per_device( c_data, 'read_requests', instance, 'disk.device.read.requests', 'request') class ReadBytesPollster(_Base): def _get_samples(self, instance, c_data): return self._get_sample_read_and_write( instance, 'disk.read.bytes', 'B', c_data, 'r_bytes', 'read_bytes') class PerDeviceReadBytesPollster(_Base): def _get_samples(self, instance, c_data): return self._get_samples_per_device( c_data, 'read_bytes', instance, 'disk.device.read.bytes', 'B') class WriteRequestsPollster(_Base): def _get_samples(self, instance, c_data): return self._get_sample_read_and_write( instance, 'disk.write.requests', 'request', c_data, 'w_requests', 'write_requests') class PerDeviceWriteRequestsPollster(_Base): def _get_samples(self, instance, c_data): return self._get_samples_per_device( c_data, 'write_requests', instance, 'disk.device.write.requests', 'request') class WriteBytesPollster(_Base): def _get_samples(self, instance, c_data): return self._get_sample_read_and_write( instance, 'disk.write.bytes', 'B', c_data, 'w_bytes', 'write_bytes') class PerDeviceWriteBytesPollster(_Base): def _get_samples(self, instance, c_data): return self._get_samples_per_device( c_data, 'write_bytes', instance, 'disk.device.write.bytes', 'B') @six.add_metaclass(abc.ABCMeta) class _DiskRatesPollsterBase(pollsters.BaseComputePollster): CACHE_KEY_DISK_RATE = 'diskio-rate' def _populate_cache(self, inspector, cache, instance): i_cache = cache.setdefault(self.CACHE_KEY_DISK_RATE, {}) if instance.id not in i_cache: r_bytes_rate = 0 r_requests_rate = 0 w_bytes_rate = 0 w_requests_rate = 0 per_disk_r_bytes_rate = {} per_disk_r_requests_rate = {} per_disk_w_bytes_rate = {} per_disk_w_requests_rate = {} disk_rates = inspector.inspect_disk_rates( instance, self._inspection_duration) for disk, info in disk_rates: r_bytes_rate += info.read_bytes_rate r_requests_rate += info.read_requests_rate w_bytes_rate += info.write_bytes_rate w_requests_rate += info.write_requests_rate per_disk_r_bytes_rate[disk.device] = info.read_bytes_rate per_disk_r_requests_rate[disk.device] = info.read_requests_rate per_disk_w_bytes_rate[disk.device] = info.write_bytes_rate per_disk_w_requests_rate[disk.device] = ( info.write_requests_rate) per_disk_rate = { 'read_bytes_rate': per_disk_r_bytes_rate, 'read_requests_rate': per_disk_r_requests_rate, 'write_bytes_rate': per_disk_w_bytes_rate, 'write_requests_rate': per_disk_w_requests_rate, } i_cache[instance.id] = DiskRateData( r_bytes_rate, r_requests_rate, w_bytes_rate, w_requests_rate, per_disk_rate ) return i_cache[instance.id] @abc.abstractmethod def _get_samples(self, instance, disk_rates_info): """Return one or more Sample.""" def get_samples(self, manager, cache, resources): self._inspection_duration = self._record_poll_time() for instance in resources: try: disk_rates_info = self._populate_cache( self.inspector, cache, instance, ) for disk_rate in self._get_samples(instance, disk_rates_info): yield disk_rate except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('%(inspector)s does not provide data for ' ' %(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) except Exception as err: instance_name = util.instance_name(instance) LOG.exception(_('Ignoring instance %(name)s: %(error)s'), {'name': instance_name, 'error': err}) def _get_samples_per_device(self, disk_rates_info, _attr, instance, _name, _unit): """Return one or more Samples for meter 'disk.device.*'.""" samples = [] for disk, value in six.iteritems(disk_rates_info.per_disk_rate[ _attr]): samples.append(util.make_sample_from_instance( instance, name=_name, type=sample.TYPE_GAUGE, unit=_unit, volume=value, resource_id="%s-%s" % (instance.id, disk), additional_metadata={'disk_name': disk}, )) return samples def _get_sample_read_and_write(self, instance, _name, _unit, _element, _attr1, _attr2): """Read / write Pollster and return one Sample""" return [util.make_sample_from_instance( instance, name=_name, type=sample.TYPE_GAUGE, unit=_unit, volume=getattr(_element, _attr1), additional_metadata={ 'device': getattr(_element, _attr2)[_attr1].keys()}, )] class ReadBytesRatePollster(_DiskRatesPollsterBase): def _get_samples(self, instance, disk_rates_info): return self._get_sample_read_and_write( instance, 'disk.read.bytes.rate', 'B/s', disk_rates_info, 'read_bytes_rate', 'per_disk_rate') class PerDeviceReadBytesRatePollster(_DiskRatesPollsterBase): def _get_samples(self, instance, disk_rates_info): return self._get_samples_per_device( disk_rates_info, 'read_bytes_rate', instance, 'disk.device.read.bytes.rate', 'B/s') class ReadRequestsRatePollster(_DiskRatesPollsterBase): def _get_samples(self, instance, disk_rates_info): return self._get_sample_read_and_write( instance, 'disk.read.requests.rate', 'requests/s', disk_rates_info, 'read_requests_rate', 'per_disk_rate') class PerDeviceReadRequestsRatePollster(_DiskRatesPollsterBase): def _get_samples(self, instance, disk_rates_info): return self._get_samples_per_device( disk_rates_info, 'read_requests_rate', instance, 'disk.device.read.requests.rate', 'requests/s') class WriteBytesRatePollster(_DiskRatesPollsterBase): def _get_samples(self, instance, disk_rates_info): return self._get_sample_read_and_write( instance, 'disk.write.bytes.rate', 'B/s', disk_rates_info, 'write_bytes_rate', 'per_disk_rate') class PerDeviceWriteBytesRatePollster(_DiskRatesPollsterBase): def _get_samples(self, instance, disk_rates_info): return self._get_samples_per_device( disk_rates_info, 'write_bytes_rate', instance, 'disk.device.write.bytes.rate', 'B/s') class WriteRequestsRatePollster(_DiskRatesPollsterBase): def _get_samples(self, instance, disk_rates_info): return self._get_sample_read_and_write( instance, 'disk.write.requests.rate', 'requests/s', disk_rates_info, 'write_requests_rate', 'per_disk_rate') class PerDeviceWriteRequestsRatePollster(_DiskRatesPollsterBase): def _get_samples(self, instance, disk_rates_info): return self._get_samples_per_device( disk_rates_info, 'write_requests_rate', instance, 'disk.device.write.requests.rate', 'requests/s') @six.add_metaclass(abc.ABCMeta) class _DiskLatencyPollsterBase(pollsters.BaseComputePollster): CACHE_KEY_DISK_LATENCY = 'disk-latency' def _populate_cache(self, inspector, cache, instance): return self._populate_cache_create( cache.setdefault(self.CACHE_KEY_DISK_LATENCY, {}), instance, inspector, DiskLatencyData, 'inspect_disk_latency', 'disk_latency') @abc.abstractmethod def _get_samples(self, instance, disk_rates_info): """Return one or more Sample.""" def get_samples(self, manager, cache, resources): for instance in resources: try: disk_latency_info = self._populate_cache( self.inspector, cache, instance, ) for disk_latency in self._get_samples(instance, disk_latency_info): yield disk_latency except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('%(inspector)s does not provide data for ' ' %(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) except Exception as err: instance_name = util.instance_name(instance) LOG.exception(_('Ignoring instance %(name)s: %(error)s'), {'name': instance_name, 'error': err}) class DiskLatencyPollster(_DiskLatencyPollsterBase): def _get_samples(self, instance, disk_latency_info): return [util.make_sample_from_instance( instance, name='disk.latency', type=sample.TYPE_GAUGE, unit='ms', volume=disk_latency_info.disk_latency / 1000 )] class PerDeviceDiskLatencyPollster(_DiskLatencyPollsterBase): def _get_samples(self, instance, disk_latency_info): samples = [] for disk, value in six.iteritems(disk_latency_info.per_disk_latency[ 'disk_latency']): samples.append(util.make_sample_from_instance( instance, name='disk.device.latency', type=sample.TYPE_GAUGE, unit='ms', volume=value / 1000, resource_id="%s-%s" % (instance.id, disk), additional_metadata={'disk_name': disk}, )) return samples class _DiskIOPSPollsterBase(pollsters.BaseComputePollster): CACHE_KEY_DISK_IOPS = 'disk-iops' def _populate_cache(self, inspector, cache, instance): return self._populate_cache_create( cache.setdefault(self.CACHE_KEY_DISK_IOPS, {}), instance, inspector, DiskIOPSData, 'inspect_disk_iops', 'iops_count') @abc.abstractmethod def _get_samples(self, instance, disk_rates_info): """Return one or more Sample.""" def get_samples(self, manager, cache, resources): for instance in resources: try: disk_iops_info = self._populate_cache( self.inspector, cache, instance, ) for disk_iops in self._get_samples(instance, disk_iops_info): yield disk_iops except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('%(inspector)s does not provide data for ' '%(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) except Exception as err: instance_name = util.instance_name(instance) LOG.exception(_('Ignoring instance %(name)s: %(error)s'), {'name': instance_name, 'error': err}) class DiskIOPSPollster(_DiskIOPSPollsterBase): def _get_samples(self, instance, disk_iops_info): return [util.make_sample_from_instance( instance, name='disk.iops', type=sample.TYPE_GAUGE, unit='count/s', volume=disk_iops_info.iops_count )] class PerDeviceDiskIOPSPollster(_DiskIOPSPollsterBase): def _get_samples(self, instance, disk_iops_info): samples = [] for disk, value in six.iteritems(disk_iops_info.per_disk_iops[ 'iops_count']): samples.append(util.make_sample_from_instance( instance, name='disk.device.iops', type=sample.TYPE_GAUGE, unit='count/s', volume=value, resource_id="%s-%s" % (instance.id, disk), additional_metadata={'disk_name': disk}, )) return samples @six.add_metaclass(abc.ABCMeta) class _DiskInfoPollsterBase(pollsters.BaseComputePollster): CACHE_KEY_DISK_INFO = 'diskinfo' def _populate_cache(self, inspector, cache, instance): i_cache = cache.setdefault(self.CACHE_KEY_DISK_INFO, {}) if instance.id not in i_cache: all_capacity = 0 all_allocation = 0 all_physical = 0 per_disk_capacity = {} per_disk_allocation = {} per_disk_physical = {} disk_info = inspector.inspect_disk_info( instance) for disk, info in disk_info: all_capacity += info.capacity all_allocation += info.allocation all_physical += info.physical per_disk_capacity[disk.device] = info.capacity per_disk_allocation[disk.device] = info.allocation per_disk_physical[disk.device] = info.physical per_disk_info = { 'capacity': per_disk_capacity, 'allocation': per_disk_allocation, 'physical': per_disk_physical, } i_cache[instance.id] = DiskInfoData( all_capacity, all_allocation, all_physical, per_disk_info ) return i_cache[instance.id] @abc.abstractmethod def _get_samples(self, instance, disk_info): """Return one or more Sample.""" def _get_samples_per_device(self, disk_info, _attr, instance, _name): """Return one or more Samples for meter 'disk.device.*'.""" samples = [] for disk, value in six.iteritems(disk_info.per_disk_info[_attr]): samples.append(util.make_sample_from_instance( instance, name=_name, type=sample.TYPE_GAUGE, unit='B', volume=value, resource_id="%s-%s" % (instance.id, disk), additional_metadata={'disk_name': disk}, )) return samples def _get_samples_task(self, instance, _name, disk_info, _attr1, _attr2): """Return one or more Samples for meter 'disk.task.*'.""" return [util.make_sample_from_instance( instance, name=_name, type=sample.TYPE_GAUGE, unit='B', volume=getattr(disk_info, _attr1), additional_metadata={ 'device': disk_info.per_disk_info[_attr2].keys()}, )] def get_samples(self, manager, cache, resources): for instance in resources: try: disk_size_info = self._populate_cache( self.inspector, cache, instance, ) for disk_info in self._get_samples(instance, disk_size_info): yield disk_info except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.debug('Instance %(instance_id)s was shut off while ' 'getting samples of %(pollster)s: %(exc)s', {'instance_id': instance.id, 'pollster': self.__class__.__name__, 'exc': e}) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('%(inspector)s does not provide data for ' ' %(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) except Exception as err: instance_name = util.instance_name(instance) LOG.exception(_('Ignoring instance %(name)s ' '(%(instance_id)s) : %(error)s') % ( {'name': instance_name, 'instance_id': instance.id, 'error': err})) class CapacityPollster(_DiskInfoPollsterBase): def _get_samples(self, instance, disk_info): return self._get_samples_task( instance, 'disk.capacity', disk_info, 'capacity', 'capacity') class PerDeviceCapacityPollster(_DiskInfoPollsterBase): def _get_samples(self, instance, disk_info): return self._get_samples_per_device( disk_info, 'capacity', instance, 'disk.device.capacity') class AllocationPollster(_DiskInfoPollsterBase): def _get_samples(self, instance, disk_info): return self._get_samples_task( instance, 'disk.allocation', disk_info, 'allocation', 'allocation') class PerDeviceAllocationPollster(_DiskInfoPollsterBase): def _get_samples(self, instance, disk_info): return self._get_samples_per_device( disk_info, 'allocation', instance, 'disk.device.allocation') class PhysicalPollster(_DiskInfoPollsterBase): def _get_samples(self, instance, disk_info): return self._get_samples_task( instance, 'disk.usage', disk_info, 'physical', 'physical') class PerDevicePhysicalPollster(_DiskInfoPollsterBase): def _get_samples(self, instance, disk_info): return self._get_samples_per_device( disk_info, 'physical', instance, 'disk.device.usage') ceilometer-6.1.5/ceilometer/compute/pollsters/instance.py0000664000567000056710000000215113072744705025035 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.compute import pollsters from ceilometer.compute.pollsters import util from ceilometer import sample class InstancePollster(pollsters.BaseComputePollster): @staticmethod def get_samples(manager, cache, resources): for instance in resources: yield util.make_sample_from_instance( instance, name='instance', type=sample.TYPE_GAUGE, unit='instance', volume=1, ) ceilometer-6.1.5/ceilometer/compute/pollsters/__init__.py0000664000567000056710000000464713072744705025004 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_utils import timeutils import six from ceilometer.agent import plugin_base from ceilometer.compute.virt import inspector as virt_inspector @six.add_metaclass(abc.ABCMeta) class BaseComputePollster(plugin_base.PollsterBase): @property def inspector(self): try: inspector = self._inspector except AttributeError: inspector = virt_inspector.get_hypervisor_inspector() BaseComputePollster._inspector = inspector return inspector @property def default_discovery(self): return 'local_instances' @staticmethod def _populate_cache_create(_i_cache, _instance, _inspector, _DiskData, _inspector_attr, _stats_attr): """Settings and return cache.""" if _instance.id not in _i_cache: _data = 0 _per_device_data = {} disk_rates = getattr(_inspector, _inspector_attr)(_instance) for disk, stats in disk_rates: _data += getattr(stats, _stats_attr) _per_device_data[disk.device] = ( getattr(stats, _stats_attr)) _per_disk_data = { _stats_attr: _per_device_data } _i_cache[_instance.id] = _DiskData( _data, _per_disk_data ) return _i_cache[_instance.id] def _record_poll_time(self): """Method records current time as the poll time. :return: time in seconds since the last poll time was recorded """ current_time = timeutils.utcnow() duration = None if hasattr(self, '_last_poll_time'): duration = timeutils.delta_seconds(self._last_poll_time, current_time) self._last_poll_time = current_time return duration ceilometer-6.1.5/ceilometer/compute/virt/0000775000567000056710000000000013072745164021615 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/compute/virt/inspector.py0000664000567000056710000002246213072744705024203 0ustar jenkinsjenkins00000000000000# # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Inspector abstraction for read-only access to hypervisors.""" import collections from oslo_config import cfg from oslo_log import log from stevedore import driver import ceilometer from ceilometer.i18n import _ OPTS = [ cfg.StrOpt('hypervisor_inspector', default='libvirt', help='Inspector to use for inspecting the hypervisor layer. ' 'Known inspectors are libvirt, hyperv, vmware, xenapi ' 'and powervm.'), ] cfg.CONF.register_opts(OPTS) LOG = log.getLogger(__name__) # Named tuple representing instances. # # name: the name of the instance # uuid: the UUID associated with the instance # Instance = collections.namedtuple('Instance', ['name', 'UUID']) # Named tuple representing CPU statistics. # # number: number of CPUs # time: cumulative CPU time # CPUStats = collections.namedtuple('CPUStats', ['number', 'time']) # Named tuple representing CPU Utilization statistics. # # util: CPU utilization in percentage # CPUUtilStats = collections.namedtuple('CPUUtilStats', ['util']) # Named tuple representing Memory usage statistics. # # usage: Amount of memory used # MemoryUsageStats = collections.namedtuple('MemoryUsageStats', ['usage']) # Named tuple representing Resident Memory usage statistics. # # resident: Amount of resident memory # MemoryResidentStats = collections.namedtuple('MemoryResidentStats', ['resident']) # Named tuple representing vNICs. # # name: the name of the vNIC # mac: the MAC address # fref: the filter ref # parameters: miscellaneous parameters # Interface = collections.namedtuple('Interface', ['name', 'mac', 'fref', 'parameters']) # Named tuple representing vNIC statistics. # # rx_bytes: number of received bytes # rx_packets: number of received packets # tx_bytes: number of transmitted bytes # tx_packets: number of transmitted packets # InterfaceStats = collections.namedtuple('InterfaceStats', ['rx_bytes', 'rx_packets', 'tx_bytes', 'tx_packets']) # Named tuple representing vNIC rate statistics. # # rx_bytes_rate: rate of received bytes # tx_bytes_rate: rate of transmitted bytes # InterfaceRateStats = collections.namedtuple('InterfaceRateStats', ['rx_bytes_rate', 'tx_bytes_rate']) # Named tuple representing disks. # # device: the device name for the disk # Disk = collections.namedtuple('Disk', ['device']) # Named tuple representing disk statistics. # # read_bytes: number of bytes read # read_requests: number of read operations # write_bytes: number of bytes written # write_requests: number of write operations # errors: number of errors # DiskStats = collections.namedtuple('DiskStats', ['read_bytes', 'read_requests', 'write_bytes', 'write_requests', 'errors']) # Named tuple representing disk rate statistics. # # read_bytes_rate: number of bytes read per second # read_requests_rate: number of read operations per second # write_bytes_rate: number of bytes written per second # write_requests_rate: number of write operations per second # DiskRateStats = collections.namedtuple('DiskRateStats', ['read_bytes_rate', 'read_requests_rate', 'write_bytes_rate', 'write_requests_rate']) # Named tuple representing disk latency statistics. # # disk_latency: average disk latency # DiskLatencyStats = collections.namedtuple('DiskLatencyStats', ['disk_latency']) # Named tuple representing disk iops statistics. # # iops: number of iops per second # DiskIOPSStats = collections.namedtuple('DiskIOPSStats', ['iops_count']) # Named tuple representing disk Information. # # capacity: capacity of the disk # allocation: allocation of the disk # physical: usage of the disk DiskInfo = collections.namedtuple('DiskInfo', ['capacity', 'allocation', 'physical']) # Exception types # class InspectorException(Exception): def __init__(self, message=None): super(InspectorException, self).__init__(message) class InstanceNotFoundException(InspectorException): pass class InstanceShutOffException(InspectorException): pass class NoDataException(InspectorException): pass # Main virt inspector abstraction layering over the hypervisor API. # class Inspector(object): def inspect_cpus(self, instance): """Inspect the CPU statistics for an instance. :param instance: the target instance :return: the number of CPUs and cumulative CPU time """ raise ceilometer.NotImplementedError def inspect_cpu_util(self, instance, duration=None): """Inspect the CPU Utilization (%) for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: the percentage of CPU utilization """ raise ceilometer.NotImplementedError def inspect_vnics(self, instance): """Inspect the vNIC statistics for an instance. :param instance: the target instance :return: for each vNIC, the number of bytes & packets received and transmitted """ raise ceilometer.NotImplementedError def inspect_vnic_rates(self, instance, duration=None): """Inspect the vNIC rate statistics for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: for each vNIC, the rate of bytes & packets received and transmitted """ raise ceilometer.NotImplementedError def inspect_disks(self, instance): """Inspect the disk statistics for an instance. :param instance: the target instance :return: for each disk, the number of bytes & operations read and written, and the error count """ raise ceilometer.NotImplementedError def inspect_memory_usage(self, instance, duration=None): """Inspect the memory usage statistics for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: the amount of memory used """ raise ceilometer.NotImplementedError def inspect_memory_resident(self, instance, duration=None): """Inspect the resident memory statistics for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: the amount of resident memory """ raise ceilometer.NotImplementedError def inspect_disk_rates(self, instance, duration=None): """Inspect the disk statistics as rates for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: for each disk, the number of bytes & operations read and written per second, with the error count """ raise ceilometer.NotImplementedError def inspect_disk_latency(self, instance): """Inspect the disk statistics as rates for an instance. :param instance: the target instance :return: for each disk, the average disk latency """ raise ceilometer.NotImplementedError def inspect_disk_iops(self, instance): """Inspect the disk statistics as rates for an instance. :param instance: the target instance :return: for each disk, the number of iops per second """ raise ceilometer.NotImplementedError def inspect_disk_info(self, instance): """Inspect the disk information for an instance. :param instance: the target instance :return: for each disk , capacity , alloaction and usage """ raise ceilometer.NotImplementedError def get_hypervisor_inspector(): try: namespace = 'ceilometer.compute.virt' mgr = driver.DriverManager(namespace, cfg.CONF.hypervisor_inspector, invoke_on_load=True) return mgr.driver except ImportError as e: LOG.error(_("Unable to load the hypervisor inspector: %s") % e) return Inspector() ceilometer-6.1.5/ceilometer/compute/virt/libvirt/0000775000567000056710000000000013072745164023270 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/compute/virt/libvirt/inspector.py0000664000567000056710000002176213072744705025660 0ustar jenkinsjenkins00000000000000# # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of Inspector abstraction for libvirt.""" from lxml import etree from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units import six from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.i18n import _ libvirt = None LOG = logging.getLogger(__name__) OPTS = [ cfg.StrOpt('libvirt_type', default='kvm', choices=['kvm', 'lxc', 'qemu', 'uml', 'xen'], help='Libvirt domain type.'), cfg.StrOpt('libvirt_uri', default='', help='Override the default libvirt URI ' '(which is dependent on libvirt_type).'), ] CONF = cfg.CONF CONF.register_opts(OPTS) def retry_on_disconnect(function): def decorator(self, *args, **kwargs): try: return function(self, *args, **kwargs) except libvirt.libvirtError as e: if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_ERR_INTERNAL_ERROR) and e.get_error_domain() in (libvirt.VIR_FROM_REMOTE, libvirt.VIR_FROM_RPC)): LOG.debug('Connection to libvirt broken') self.connection = None return function(self, *args, **kwargs) else: raise return decorator class LibvirtInspector(virt_inspector.Inspector): per_type_uris = dict(uml='uml:///system', xen='xen:///', lxc='lxc:///') def __init__(self): self.uri = self._get_uri() self.connection = None def _get_uri(self): return CONF.libvirt_uri or self.per_type_uris.get(CONF.libvirt_type, 'qemu:///system') def _get_connection(self): if not self.connection: global libvirt if libvirt is None: libvirt = __import__('libvirt') LOG.debug('Connecting to libvirt: %s', self.uri) self.connection = libvirt.openReadOnly(self.uri) return self.connection @retry_on_disconnect def _lookup_by_uuid(self, instance): instance_name = util.instance_name(instance) try: return self._get_connection().lookupByUUIDString(instance.id) except Exception as ex: if not libvirt or not isinstance(ex, libvirt.libvirtError): raise virt_inspector.InspectorException(six.text_type(ex)) error_code = ex.get_error_code() if (error_code in (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_ERR_INTERNAL_ERROR) and ex.get_error_domain() in (libvirt.VIR_FROM_REMOTE, libvirt.VIR_FROM_RPC)): raise msg = _("Error from libvirt while looking up instance " ": " "[Error Code %(error_code)s] " "%(ex)s") % {'name': instance_name, 'id': instance.id, 'error_code': error_code, 'ex': ex} raise virt_inspector.InstanceNotFoundException(msg) def inspect_cpus(self, instance): domain = self._get_domain_not_shut_off_or_raise(instance) dom_info = domain.info() return virt_inspector.CPUStats(number=dom_info[3], time=dom_info[4]) def _get_domain_not_shut_off_or_raise(self, instance): instance_name = util.instance_name(instance) domain = self._lookup_by_uuid(instance) state = domain.info()[0] if state == libvirt.VIR_DOMAIN_SHUTOFF: msg = _('Failed to inspect data of instance ' ', ' 'domain state is SHUTOFF.') % { 'name': instance_name, 'id': instance.id} raise virt_inspector.InstanceShutOffException(msg) return domain def inspect_vnics(self, instance): domain = self._get_domain_not_shut_off_or_raise(instance) tree = etree.fromstring(domain.XMLDesc(0)) for iface in tree.findall('devices/interface'): target = iface.find('target') if target is not None: name = target.get('dev') else: continue mac = iface.find('mac') if mac is not None: mac_address = mac.get('address') else: continue fref = iface.find('filterref') if fref is not None: fref = fref.get('filter') params = dict((p.get('name').lower(), p.get('value')) for p in iface.findall('filterref/parameter')) interface = virt_inspector.Interface(name=name, mac=mac_address, fref=fref, parameters=params) dom_stats = domain.interfaceStats(name) stats = virt_inspector.InterfaceStats(rx_bytes=dom_stats[0], rx_packets=dom_stats[1], tx_bytes=dom_stats[4], tx_packets=dom_stats[5]) yield (interface, stats) def inspect_disks(self, instance): domain = self._get_domain_not_shut_off_or_raise(instance) tree = etree.fromstring(domain.XMLDesc(0)) for device in filter( bool, [target.get("dev") for target in tree.findall('devices/disk/target')]): disk = virt_inspector.Disk(device=device) block_stats = domain.blockStats(device) stats = virt_inspector.DiskStats(read_requests=block_stats[0], read_bytes=block_stats[1], write_requests=block_stats[2], write_bytes=block_stats[3], errors=block_stats[4]) yield (disk, stats) def inspect_memory_usage(self, instance, duration=None): instance_name = util.instance_name(instance) domain = self._get_domain_not_shut_off_or_raise(instance) try: memory_stats = domain.memoryStats() if (memory_stats and memory_stats.get('available') and memory_stats.get('unused')): memory_used = (memory_stats.get('available') - memory_stats.get('unused')) # Stat provided from libvirt is in KB, converting it to MB. memory_used = memory_used / units.Ki return virt_inspector.MemoryUsageStats(usage=memory_used) else: msg = _('Failed to inspect memory usage of instance ' ', ' 'can not get info from libvirt.') % { 'name': instance_name, 'id': instance.id} raise virt_inspector.NoDataException(msg) # memoryStats might launch an exception if the method is not supported # by the underlying hypervisor being used by libvirt. except libvirt.libvirtError as e: msg = _('Failed to inspect memory usage of %(instance_uuid)s, ' 'can not get info from libvirt: %(error)s') % { 'instance_uuid': instance.id, 'error': e} raise virt_inspector.NoDataException(msg) def inspect_disk_info(self, instance): domain = self._get_domain_not_shut_off_or_raise(instance) tree = etree.fromstring(domain.XMLDesc(0)) for device in filter( bool, [target.get("dev") for target in tree.findall('devices/disk/target')]): disk = virt_inspector.Disk(device=device) block_info = domain.blockInfo(device) info = virt_inspector.DiskInfo(capacity=block_info[0], allocation=block_info[1], physical=block_info[2]) yield (disk, info) def inspect_memory_resident(self, instance, duration=None): domain = self._get_domain_not_shut_off_or_raise(instance) memory = domain.memoryStats()['rss'] / units.Ki return virt_inspector.MemoryResidentStats(resident=memory) ceilometer-6.1.5/ceilometer/compute/virt/libvirt/__init__.py0000664000567000056710000000000013072744703025365 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/compute/virt/xenapi/0000775000567000056710000000000013072745164023101 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/compute/virt/xenapi/inspector.py0000664000567000056710000001736413072744705025474 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of Inspector abstraction for XenAPI.""" from oslo_config import cfg from oslo_utils import units import six.moves.urllib.parse as urlparse try: import XenAPI as api except ImportError: api = None from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.i18n import _ opt_group = cfg.OptGroup(name='xenapi', title='Options for XenAPI') OPTS = [ cfg.StrOpt('connection_url', help='URL for connection to XenServer/Xen Cloud Platform.'), cfg.StrOpt('connection_username', default='root', help='Username for connection to XenServer/Xen Cloud ' 'Platform.'), cfg.StrOpt('connection_password', help='Password for connection to XenServer/Xen Cloud Platform.', secret=True), ] CONF = cfg.CONF CONF.register_group(opt_group) CONF.register_opts(OPTS, group=opt_group) class XenapiException(virt_inspector.InspectorException): pass def swap_xapi_host(url, host_addr): """Replace the XenServer address present in 'url' with 'host_addr'.""" temp_url = urlparse.urlparse(url) # The connection URL is served by XAPI and doesn't support having a # path for the connection url after the port. And username/password # will be pass separately. So the URL like "http://abc:abc@abc:433/abc" # should not appear for XAPI case. temp_netloc = temp_url.netloc.replace(temp_url.hostname, '%s' % host_addr) replaced = temp_url._replace(netloc=temp_netloc) return urlparse.urlunparse(replaced) def get_api_session(): if not api: raise ImportError(_('XenAPI not installed')) url = CONF.xenapi.connection_url username = CONF.xenapi.connection_username password = CONF.xenapi.connection_password if not url or password is None: raise XenapiException(_('Must specify connection_url, and ' 'connection_password to use')) try: session = (api.xapi_local() if url == 'unix://local' else api.Session(url)) session.login_with_password(username, password) except api.Failure as e: if e.details[0] == 'HOST_IS_SLAVE': master = e.details[1] url = swap_xapi_host(url, master) try: session = api.Session(url) session.login_with_password(username, password) except api.Failure as es: raise XenapiException(_('Could not connect slave host: %s ') % es.details[0]) else: msg = _("Could not connect to XenAPI: %s") % e.details[0] raise XenapiException(msg) return session class XenapiInspector(virt_inspector.Inspector): def __init__(self): super(XenapiInspector, self).__init__() self.session = get_api_session() def _get_host_ref(self): """Return the xenapi host on which nova-compute runs on.""" return self.session.xenapi.session.get_this_host(self.session.handle) def _call_xenapi(self, method, *args): return self.session.xenapi_request(method, args) def _lookup_by_name(self, instance_name): vm_refs = self._call_xenapi("VM.get_by_name_label", instance_name) n = len(vm_refs) if n == 0: raise virt_inspector.InstanceNotFoundException( _('VM %s not found in XenServer') % instance_name) elif n > 1: raise XenapiException( _('Multiple VM %s found in XenServer') % instance_name) else: return vm_refs[0] def inspect_cpu_util(self, instance, duration=None): instance_name = util.instance_name(instance) vm_ref = self._lookup_by_name(instance_name) metrics_ref = self._call_xenapi("VM.get_metrics", vm_ref) metrics_rec = self._call_xenapi("VM_metrics.get_record", metrics_ref) vcpus_number = metrics_rec['VCPUs_number'] vcpus_utils = metrics_rec['VCPUs_utilisation'] if len(vcpus_utils) == 0: msg = _("Could not get VM %s CPU Utilization") % instance_name raise XenapiException(msg) utils = 0.0 for num in range(int(vcpus_number)): utils += vcpus_utils.get(str(num)) utils = utils / int(vcpus_number) * 100 return virt_inspector.CPUUtilStats(util=utils) def inspect_memory_usage(self, instance, duration=None): instance_name = util.instance_name(instance) vm_ref = self._lookup_by_name(instance_name) metrics_ref = self._call_xenapi("VM.get_metrics", vm_ref) metrics_rec = self._call_xenapi("VM_metrics.get_record", metrics_ref) # Stat provided from XenServer is in B, converting it to MB. memory = int(metrics_rec['memory_actual']) / units.Mi return virt_inspector.MemoryUsageStats(usage=memory) def inspect_vnic_rates(self, instance, duration=None): instance_name = util.instance_name(instance) vm_ref = self._lookup_by_name(instance_name) vif_refs = self._call_xenapi("VM.get_VIFs", vm_ref) if vif_refs: for vif_ref in vif_refs: vif_rec = self._call_xenapi("VIF.get_record", vif_ref) vif_metrics_ref = self._call_xenapi( "VIF.get_metrics", vif_ref) vif_metrics_rec = self._call_xenapi( "VIF_metrics.get_record", vif_metrics_ref) interface = virt_inspector.Interface( name=vif_rec['uuid'], mac=vif_rec['MAC'], fref=None, parameters=None) rx_rate = float(vif_metrics_rec['io_read_kbs']) * units.Ki tx_rate = float(vif_metrics_rec['io_write_kbs']) * units.Ki stats = virt_inspector.InterfaceRateStats(rx_rate, tx_rate) yield (interface, stats) def inspect_disk_rates(self, instance, duration=None): instance_name = util.instance_name(instance) vm_ref = self._lookup_by_name(instance_name) vbd_refs = self._call_xenapi("VM.get_VBDs", vm_ref) if vbd_refs: for vbd_ref in vbd_refs: vbd_rec = self._call_xenapi("VBD.get_record", vbd_ref) vbd_metrics_ref = self._call_xenapi("VBD.get_metrics", vbd_ref) vbd_metrics_rec = self._call_xenapi("VBD_metrics.get_record", vbd_metrics_ref) disk = virt_inspector.Disk(device=vbd_rec['device']) # Stats provided from XenServer are in KB/s, # converting it to B/s. read_rate = float(vbd_metrics_rec['io_read_kbs']) * units.Ki write_rate = float(vbd_metrics_rec['io_write_kbs']) * units.Ki disk_rate_info = virt_inspector.DiskRateStats( read_bytes_rate=read_rate, read_requests_rate=0, write_bytes_rate=write_rate, write_requests_rate=0) yield(disk, disk_rate_info) ceilometer-6.1.5/ceilometer/compute/virt/xenapi/__init__.py0000664000567000056710000000000013072744703025176 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/compute/virt/vmware/0000775000567000056710000000000013072745164023116 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/compute/virt/vmware/inspector.py0000664000567000056710000002002013072744705025470 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of Inspector abstraction for VMware vSphere""" from oslo_config import cfg from oslo_utils import units from oslo_vmware import api import six from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.compute.virt.vmware import vsphere_operations from ceilometer.i18n import _ opt_group = cfg.OptGroup(name='vmware', title='Options for VMware') OPTS = [ cfg.StrOpt('host_ip', default='', help='IP address of the VMware vSphere host.'), cfg.PortOpt('host_port', default=443, help='Port of the VMware vSphere host.'), cfg.StrOpt('host_username', default='', help='Username of VMware vSphere.'), cfg.StrOpt('host_password', default='', help='Password of VMware vSphere.', secret=True), cfg.StrOpt('ca_file', help='CA bundle file to use in verifying the vCenter server ' 'certificate.'), cfg.BoolOpt('insecure', default=False, help='If true, the vCenter server certificate is not ' 'verified. If false, then the default CA truststore is ' 'used for verification. This option is ignored if ' '"ca_file" is set.'), cfg.IntOpt('api_retry_count', default=10, help='Number of times a VMware vSphere API may be retried.'), cfg.FloatOpt('task_poll_interval', default=0.5, help='Sleep time in seconds for polling an ongoing async ' 'task.'), cfg.StrOpt('wsdl_location', help='Optional vim service WSDL location ' 'e.g http:///vimService.wsdl. ' 'Optional over-ride to default location for bug ' 'work-arounds.'), ] cfg.CONF.register_group(opt_group) cfg.CONF.register_opts(OPTS, group=opt_group) VC_AVERAGE_MEMORY_CONSUMED_CNTR = 'mem:consumed:average' VC_AVERAGE_CPU_CONSUMED_CNTR = 'cpu:usage:average' VC_NETWORK_RX_COUNTER = 'net:received:average' VC_NETWORK_TX_COUNTER = 'net:transmitted:average' VC_DISK_READ_RATE_CNTR = "disk:read:average" VC_DISK_READ_REQUESTS_RATE_CNTR = "disk:numberReadAveraged:average" VC_DISK_WRITE_RATE_CNTR = "disk:write:average" VC_DISK_WRITE_REQUESTS_RATE_CNTR = "disk:numberWriteAveraged:average" def get_api_session(): api_session = api.VMwareAPISession( cfg.CONF.vmware.host_ip, cfg.CONF.vmware.host_username, cfg.CONF.vmware.host_password, cfg.CONF.vmware.api_retry_count, cfg.CONF.vmware.task_poll_interval, wsdl_loc=cfg.CONF.vmware.wsdl_location, port=cfg.CONF.vmware.host_port, cacert=cfg.CONF.vmware.ca_file, insecure=cfg.CONF.vmware.insecure) return api_session class VsphereInspector(virt_inspector.Inspector): def __init__(self): super(VsphereInspector, self).__init__() self._ops = vsphere_operations.VsphereOperations( get_api_session(), 1000) def inspect_cpu_util(self, instance, duration=None): vm_moid = self._ops.get_vm_moid(instance.id) if vm_moid is None: raise virt_inspector.InstanceNotFoundException( _('VM %s not found in VMware vSphere') % instance.id) cpu_util_counter_id = self._ops.get_perf_counter_id( VC_AVERAGE_CPU_CONSUMED_CNTR) cpu_util = self._ops.query_vm_aggregate_stats( vm_moid, cpu_util_counter_id, duration) # For this counter vSphere returns values scaled-up by 100, since the # corresponding API can't return decimals, but only longs. # For e.g. if the utilization is 12.34%, the value returned is 1234. # Hence, dividing by 100. cpu_util = cpu_util / 100 return virt_inspector.CPUUtilStats(util=cpu_util) def inspect_vnic_rates(self, instance, duration=None): vm_moid = self._ops.get_vm_moid(instance.id) if not vm_moid: raise virt_inspector.InstanceNotFoundException( _('VM %s not found in VMware vSphere') % instance.id) vnic_stats = {} vnic_ids = set() for net_counter in (VC_NETWORK_RX_COUNTER, VC_NETWORK_TX_COUNTER): net_counter_id = self._ops.get_perf_counter_id(net_counter) vnic_id_to_stats_map = self._ops.query_vm_device_stats( vm_moid, net_counter_id, duration) vnic_stats[net_counter] = vnic_id_to_stats_map vnic_ids.update(six.iterkeys(vnic_id_to_stats_map)) # Stats provided from vSphere are in KB/s, converting it to B/s. for vnic_id in vnic_ids: rx_bytes_rate = (vnic_stats[VC_NETWORK_RX_COUNTER] .get(vnic_id, 0) * units.Ki) tx_bytes_rate = (vnic_stats[VC_NETWORK_TX_COUNTER] .get(vnic_id, 0) * units.Ki) stats = virt_inspector.InterfaceRateStats(rx_bytes_rate, tx_bytes_rate) interface = virt_inspector.Interface( name=vnic_id, mac=None, fref=None, parameters=None) yield (interface, stats) def inspect_memory_usage(self, instance, duration=None): vm_moid = self._ops.get_vm_moid(instance.id) if vm_moid is None: raise virt_inspector.InstanceNotFoundException( _('VM %s not found in VMware vSphere') % instance.id) mem_counter_id = self._ops.get_perf_counter_id( VC_AVERAGE_MEMORY_CONSUMED_CNTR) memory = self._ops.query_vm_aggregate_stats( vm_moid, mem_counter_id, duration) # Stat provided from vSphere is in KB, converting it to MB. memory = memory / units.Ki return virt_inspector.MemoryUsageStats(usage=memory) def inspect_disk_rates(self, instance, duration=None): vm_moid = self._ops.get_vm_moid(instance.id) if not vm_moid: raise virt_inspector.InstanceNotFoundException( _('VM %s not found in VMware vSphere') % instance.id) disk_stats = {} disk_ids = set() disk_counters = [ VC_DISK_READ_RATE_CNTR, VC_DISK_READ_REQUESTS_RATE_CNTR, VC_DISK_WRITE_RATE_CNTR, VC_DISK_WRITE_REQUESTS_RATE_CNTR ] for disk_counter in disk_counters: disk_counter_id = self._ops.get_perf_counter_id(disk_counter) disk_id_to_stat_map = self._ops.query_vm_device_stats( vm_moid, disk_counter_id, duration) disk_stats[disk_counter] = disk_id_to_stat_map disk_ids.update(six.iterkeys(disk_id_to_stat_map)) for disk_id in disk_ids: def stat_val(counter_name): return disk_stats[counter_name].get(disk_id, 0) disk = virt_inspector.Disk(device=disk_id) # Stats provided from vSphere are in KB/s, converting it to B/s. disk_rate_info = virt_inspector.DiskRateStats( read_bytes_rate=stat_val(VC_DISK_READ_RATE_CNTR) * units.Ki, read_requests_rate=stat_val(VC_DISK_READ_REQUESTS_RATE_CNTR), write_bytes_rate=stat_val(VC_DISK_WRITE_RATE_CNTR) * units.Ki, write_requests_rate=stat_val(VC_DISK_WRITE_REQUESTS_RATE_CNTR) ) yield(disk, disk_rate_info) ceilometer-6.1.5/ceilometer/compute/virt/vmware/vsphere_operations.py0000664000567000056710000002371713072744705027421 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_vmware import vim_util PERF_MANAGER_TYPE = "PerformanceManager" PERF_COUNTER_PROPERTY = "perfCounter" VM_INSTANCE_ID_PROPERTY = 'config.extraConfig["nvp.vm-uuid"].value' # ESXi Servers sample performance data every 20 seconds. 20-second interval # data is called instance data or real-time data. To retrieve instance data, # we need to specify a value of 20 seconds for the "PerfQuerySpec.intervalId" # property. In that case the "QueryPerf" method operates as a raw data feed # that bypasses the vCenter database and instead retrieves performance data # from an ESXi host. # The following value is time interval for real-time performance stats # in seconds and it is not configurable. VC_REAL_TIME_SAMPLING_INTERVAL = 20 class VsphereOperations(object): """Class to invoke vSphere APIs calls. vSphere APIs calls are required by various pollsters, collecting data from VMware infrastructure. """ def __init__(self, api_session, max_objects): self._api_session = api_session self._max_objects = max_objects # Mapping between "VM's Nova instance Id" -> "VM's MOID" # In case a VM is deployed by Nova, then its name is instance ID. # So this map essentially has VM names as keys. self._vm_moid_lookup_map = {} # Mapping from full name -> ID, for VC Performance counters self._perf_counter_id_lookup_map = None def _init_vm_moid_lookup_map(self): session = self._api_session result = session.invoke_api(vim_util, "get_objects", session.vim, "VirtualMachine", self._max_objects, [VM_INSTANCE_ID_PROPERTY], False) while result: for vm_object in result.objects: vm_moid = vm_object.obj.value # propSet will be set only if the server provides value if hasattr(vm_object, 'propSet') and vm_object.propSet: vm_instance_id = vm_object.propSet[0].val if vm_instance_id: self._vm_moid_lookup_map[vm_instance_id] = vm_moid result = session.invoke_api(vim_util, "continue_retrieval", session.vim, result) def get_vm_moid(self, vm_instance_id): """Method returns VC MOID of the VM by its NOVA instance ID.""" if vm_instance_id not in self._vm_moid_lookup_map: self._init_vm_moid_lookup_map() return self._vm_moid_lookup_map.get(vm_instance_id, None) def _init_perf_counter_id_lookup_map(self): # Query details of all the performance counters from VC session = self._api_session client_factory = session.vim.client.factory perf_manager = session.vim.service_content.perfManager prop_spec = vim_util.build_property_spec( client_factory, PERF_MANAGER_TYPE, [PERF_COUNTER_PROPERTY]) obj_spec = vim_util.build_object_spec( client_factory, perf_manager, None) filter_spec = vim_util.build_property_filter_spec( client_factory, [prop_spec], [obj_spec]) options = client_factory.create('ns0:RetrieveOptions') options.maxObjects = 1 prop_collector = session.vim.service_content.propertyCollector result = session.invoke_api(session.vim, "RetrievePropertiesEx", prop_collector, specSet=[filter_spec], options=options) perf_counter_infos = result.objects[0].propSet[0].val.PerfCounterInfo # Extract the counter Id for each counter and populate the map self._perf_counter_id_lookup_map = {} for perf_counter_info in perf_counter_infos: counter_group = perf_counter_info.groupInfo.key counter_name = perf_counter_info.nameInfo.key counter_rollup_type = perf_counter_info.rollupType counter_id = perf_counter_info.key counter_full_name = (counter_group + ":" + counter_name + ":" + counter_rollup_type) self._perf_counter_id_lookup_map[counter_full_name] = counter_id def get_perf_counter_id(self, counter_full_name): """Method returns the ID of VC performance counter by its full name. A VC performance counter is uniquely identified by the tuple {'Group Name', 'Counter Name', 'Rollup Type'}. It will have an id - counter ID (changes from one VC to another), which is required to query performance stats from that VC. This method returns the ID for a counter, assuming 'CounterFullName' => 'Group Name:CounterName:RollupType'. """ if not self._perf_counter_id_lookup_map: self._init_perf_counter_id_lookup_map() return self._perf_counter_id_lookup_map[counter_full_name] # TODO(akhils@vmware.com) Move this method to common library # when it gets checked-in def query_vm_property(self, vm_moid, property_name): """Method returns the value of specified property for a VM. :param vm_moid: moid of the VM whose property is to be queried :param property_name: path of the property """ vm_mobj = vim_util.get_moref(vm_moid, "VirtualMachine") session = self._api_session return session.invoke_api(vim_util, "get_object_property", session.vim, vm_mobj, property_name) def query_vm_aggregate_stats(self, vm_moid, counter_id, duration): """Method queries the aggregated real-time stat value for a VM. This method should be used for aggregate counters. :param vm_moid: moid of the VM :param counter_id: id of the perf counter in VC :param duration: in seconds from current time, over which the stat value was applicable :return: the aggregated stats value for the counter """ # For aggregate counters, device_name should be "" stats = self._query_vm_perf_stats(vm_moid, counter_id, "", duration) # Performance manager provides the aggregated stats value # with device name -> None return stats.get(None, 0) def query_vm_device_stats(self, vm_moid, counter_id, duration): """Method queries the real-time stat values for a VM, for all devices. This method should be used for device(non-aggregate) counters. :param vm_moid: moid of the VM :param counter_id: id of the perf counter in VC :param duration: in seconds from current time, over which the stat value was applicable :return: a map containing the stat values keyed by the device ID/name """ # For device counters, device_name should be "*" to get stat values # for all devices. stats = self._query_vm_perf_stats(vm_moid, counter_id, "*", duration) # For some device counters, in addition to the per device value # the Performance manager also returns the aggregated value. # Just to be consistent, deleting the aggregated value if present. stats.pop(None, None) return stats def _query_vm_perf_stats(self, vm_moid, counter_id, device_name, duration): """Method queries the real-time stat values for a VM. :param vm_moid: moid of the VM for which stats are needed :param counter_id: id of the perf counter in VC :param device_name: name of the device for which stats are to be queried. For aggregate counters pass empty string (""). For device counters pass "*", if stats are required over all devices. :param duration: in seconds from current time, over which the stat value was applicable :return: a map containing the stat values keyed by the device ID/name """ session = self._api_session client_factory = session.vim.client.factory # Construct the QuerySpec metric_id = client_factory.create('ns0:PerfMetricId') metric_id.counterId = counter_id metric_id.instance = device_name query_spec = client_factory.create('ns0:PerfQuerySpec') query_spec.entity = vim_util.get_moref(vm_moid, "VirtualMachine") query_spec.metricId = [metric_id] query_spec.intervalId = VC_REAL_TIME_SAMPLING_INTERVAL # We query all samples which are applicable over the specified duration samples_cnt = (int(duration / VC_REAL_TIME_SAMPLING_INTERVAL) if duration and duration >= VC_REAL_TIME_SAMPLING_INTERVAL else 1) query_spec.maxSample = samples_cnt perf_manager = session.vim.service_content.perfManager perf_stats = session.invoke_api(session.vim, 'QueryPerf', perf_manager, querySpec=[query_spec]) stat_values = {} if perf_stats: entity_metric = perf_stats[0] sample_infos = entity_metric.sampleInfo if len(sample_infos) > 0: for metric_series in entity_metric.value: # Take the average of all samples to improve the accuracy # of the stat value stat_value = float(sum(metric_series.value)) / samples_cnt device_id = metric_series.id.instance stat_values[device_id] = stat_value return stat_values ceilometer-6.1.5/ceilometer/compute/virt/vmware/__init__.py0000664000567000056710000000000013072744703025213 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/compute/virt/hyperv/0000775000567000056710000000000013072745164023132 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/compute/virt/hyperv/inspector.py0000664000567000056710000001363213072744705025517 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of Inspector abstraction for Hyper-V""" import collections import functools import sys from os_win import exceptions as os_win_exc from os_win import utilsfactory from oslo_utils import units import six from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector def convert_exceptions(function, exception_map): expected_exceptions = tuple(exception_map.keys()) @functools.wraps(function) def wrapper(*args, **kwargs): try: return function(*args, **kwargs) except expected_exceptions as ex: # exception might be a subclass of an expected exception. for expected in expected_exceptions: if isinstance(ex, expected): raised_exception = exception_map[expected] break exc_info = sys.exc_info() # NOTE(claudiub): Python 3 raises the exception object given as # the second argument in six.reraise. # The original message will be maintained by passing the original # exception. exc = raised_exception(six.text_type(exc_info[1])) six.reraise(raised_exception, exc, exc_info[2]) return wrapper def decorate_all_methods(decorator, *args, **kwargs): def decorate(cls): for attr in cls.__dict__: class_member = getattr(cls, attr) if callable(class_member): setattr(cls, attr, decorator(class_member, *args, **kwargs)) return cls return decorate exception_conversion_map = collections.OrderedDict([ # NOTE(claudiub): order should be from the most specialized exception type # to the most generic exception type. # (expected_exception, converted_exception) (os_win_exc.NotFound, virt_inspector.InstanceNotFoundException), (os_win_exc.OSWinException, virt_inspector.InspectorException), ]) # NOTE(claudiub): the purpose of the decorator below is to prevent any # os_win exceptions (subclasses of OSWinException) to leak outside of the # HyperVInspector. @decorate_all_methods(convert_exceptions, exception_conversion_map) class HyperVInspector(virt_inspector.Inspector): def __init__(self): super(HyperVInspector, self).__init__() self._utils = utilsfactory.get_metricsutils() self._host_max_cpu_clock = self._compute_host_max_cpu_clock() def _compute_host_max_cpu_clock(self): hostutils = utilsfactory.get_hostutils() # host's number of CPUs and CPU clock speed will not change. cpu_info = hostutils.get_cpus_info() host_cpu_count = len(cpu_info) host_cpu_clock = cpu_info[0]['MaxClockSpeed'] return float(host_cpu_clock * host_cpu_count) def inspect_cpus(self, instance): instance_name = util.instance_name(instance) (cpu_clock_used, cpu_count, uptime) = self._utils.get_cpu_metrics(instance_name) cpu_percent_used = cpu_clock_used / self._host_max_cpu_clock # Nanoseconds cpu_time = (int(uptime * cpu_percent_used) * units.k) return virt_inspector.CPUStats(number=cpu_count, time=cpu_time) def inspect_memory_usage(self, instance, duration=None): instance_name = util.instance_name(instance) usage = self._utils.get_memory_metrics(instance_name) return virt_inspector.MemoryUsageStats(usage=usage) def inspect_vnics(self, instance): instance_name = util.instance_name(instance) for vnic_metrics in self._utils.get_vnic_metrics(instance_name): interface = virt_inspector.Interface( name=vnic_metrics["element_name"], mac=vnic_metrics["address"], fref=None, parameters=None) stats = virt_inspector.InterfaceStats( rx_bytes=vnic_metrics['rx_mb'] * units.Mi, rx_packets=0, tx_bytes=vnic_metrics['tx_mb'] * units.Mi, tx_packets=0) yield (interface, stats) def inspect_disks(self, instance): instance_name = util.instance_name(instance) for disk_metrics in self._utils.get_disk_metrics(instance_name): disk = virt_inspector.Disk(device=disk_metrics['instance_id']) stats = virt_inspector.DiskStats( read_requests=0, # Return bytes read_bytes=disk_metrics['read_mb'] * units.Mi, write_requests=0, write_bytes=disk_metrics['write_mb'] * units.Mi, errors=0) yield (disk, stats) def inspect_disk_latency(self, instance): instance_name = util.instance_name(instance) for disk_metrics in self._utils.get_disk_latency_metrics( instance_name): disk = virt_inspector.Disk(device=disk_metrics['instance_id']) stats = virt_inspector.DiskLatencyStats( disk_latency=disk_metrics['disk_latency']) yield (disk, stats) def inspect_disk_iops(self, instance): instance_name = util.instance_name(instance) for disk_metrics in self._utils.get_disk_iops_count(instance_name): disk = virt_inspector.Disk(device=disk_metrics['instance_id']) stats = virt_inspector.DiskIOPSStats( iops_count=disk_metrics['iops_count']) yield (disk, stats) ceilometer-6.1.5/ceilometer/compute/virt/hyperv/__init__.py0000664000567000056710000000000013072744703025227 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/compute/virt/__init__.py0000664000567000056710000000000013072744703023712 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/compute/notifications/0000775000567000056710000000000013072745164023502 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/compute/notifications/instance.py0000664000567000056710000000632613072744705025667 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Converters for producing compute sample messages from notification events. """ import abc import six from ceilometer.agent import plugin_base from ceilometer.compute import notifications from ceilometer.compute import util from ceilometer import sample @six.add_metaclass(abc.ABCMeta) class UserMetadataAwareInstanceNotificationBase( notifications.ComputeNotificationBase): """Consumes notifications containing instance user metadata.""" def process_notification(self, message): instance_properties = self.get_instance_properties(message) if isinstance(instance_properties.get('metadata'), dict): src_metadata = instance_properties['metadata'] del instance_properties['metadata'] util.add_reserved_user_metadata(src_metadata, instance_properties) return self.get_sample(message) def get_instance_properties(self, message): """Retrieve instance properties from notification payload.""" return message['payload'] @abc.abstractmethod def get_sample(self, message): """Derive sample from notification payload.""" class InstanceScheduled(UserMetadataAwareInstanceNotificationBase, plugin_base.NonMetricNotificationBase): event_types = ['scheduler.run_instance.scheduled'] def get_instance_properties(self, message): """Retrieve instance properties from notification payload.""" return message['payload']['request_spec']['instance_properties'] def get_sample(self, message): yield sample.Sample.from_notification( name='instance.scheduled', type=sample.TYPE_DELTA, volume=1, unit='instance', user_id=None, project_id=message['payload']['request_spec'] ['instance_properties']['project_id'], resource_id=message['payload']['instance_id'], message=message) class ComputeInstanceNotificationBase( UserMetadataAwareInstanceNotificationBase): """Convert compute.instance.* notifications into Samples.""" event_types = ['compute.instance.*'] class Instance(ComputeInstanceNotificationBase, plugin_base.NonMetricNotificationBase): def get_sample(self, message): yield sample.Sample.from_notification( name='instance', type=sample.TYPE_GAUGE, unit='instance', volume=1, user_id=message['payload']['user_id'], project_id=message['payload']['tenant_id'], resource_id=message['payload']['instance_id'], message=message) ceilometer-6.1.5/ceilometer/compute/notifications/__init__.py0000664000567000056710000000241713072744705025617 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import oslo_messaging from ceilometer.agent import plugin_base OPTS = [ cfg.StrOpt('nova_control_exchange', default='nova', help="Exchange name for Nova notifications."), ] cfg.CONF.register_opts(OPTS) class ComputeNotificationBase(plugin_base.NotificationBase): def get_targets(self, conf): """Return a sequence of oslo_messaging.Target This sequence is defining the exchange and topics to be connected for this plugin. """ return [oslo_messaging.Target(topic=topic, exchange=conf.nova_control_exchange) for topic in self.get_notification_topics(conf)] ceilometer-6.1.5/ceilometer/compute/__init__.py0000664000567000056710000000000013072744703022726 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/service.py0000664000567000056710000000655713072744706021205 0ustar jenkinsjenkins00000000000000# Copyright 2012-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket import sys from oslo_config import cfg import oslo_i18n from oslo_log import log from oslo_reports import guru_meditation_report as gmr from ceilometer.conf import defaults from ceilometer import keystone_client from ceilometer import messaging from ceilometer import version OPTS = [ cfg.StrOpt('host', default=socket.gethostname(), help='Name of this node, which must be valid in an AMQP ' 'key. Can be an opaque identifier. For ZeroMQ only, must ' 'be a valid host name, FQDN, or IP address.'), cfg.IntOpt('http_timeout', default=600, help='Timeout seconds for HTTP requests. Set it to None to ' 'disable timeout.'), ] cfg.CONF.register_opts(OPTS) API_OPT = cfg.IntOpt('workers', default=1, min=1, deprecated_group='DEFAULT', deprecated_name='api_workers', help='Number of workers for api, default value is 1.') cfg.CONF.register_opt(API_OPT, 'api') NOTI_OPT = cfg.IntOpt('workers', default=1, min=1, deprecated_group='DEFAULT', deprecated_name='notification_workers', help='Number of workers for notification service, ' 'default value is 1.') cfg.CONF.register_opt(NOTI_OPT, 'notification') COLL_OPT = cfg.IntOpt('workers', default=1, min=1, deprecated_group='DEFAULT', deprecated_name='collector_workers', help='Number of workers for collector service. ' 'default value is 1.') cfg.CONF.register_opt(COLL_OPT, 'collector') keystone_client.register_keystoneauth_opts(cfg.CONF) def prepare_service(argv=None, config_files=None): oslo_i18n.enable_lazy() log.register_options(cfg.CONF) log_levels = (cfg.CONF.default_log_levels + ['stevedore=INFO', 'keystoneclient=INFO', 'neutronclient=INFO']) log.set_defaults(default_log_levels=log_levels) defaults.set_cors_middleware_defaults() if argv is None: argv = sys.argv cfg.CONF(argv[1:], project='ceilometer', validate_default_values=True, version=version.version_info.version_string(), default_config_files=config_files) keystone_client.setup_keystoneauth(cfg.CONF) log.setup(cfg.CONF, 'ceilometer') # NOTE(liusheng): guru cannot run with service under apache daemon, so when # ceilometer-api running with mod_wsgi, the argv is [], we don't start # guru. if argv: gmr.TextGuruMeditation.setup_autorun(version) messaging.setup() ceilometer-6.1.5/ceilometer/tests/0000775000567000056710000000000013072745164020317 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/tempest/0000775000567000056710000000000013072745164022000 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/tempest/config.py0000664000567000056710000000313713072744706023624 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg service_available_group = cfg.OptGroup(name="service_available", title="Available OpenStack Services") ServiceAvailableGroup = [ cfg.BoolOpt('ceilometer', default=True, help="Whether or not Ceilometer is expected to be available"), ] telemetry_group = cfg.OptGroup(name='telemetry', title='Telemetry Service Options') TelemetryGroup = [ cfg.StrOpt('catalog_type', default='metering', help="Catalog type of the Telemetry service."), cfg.StrOpt('endpoint_type', default='publicURL', choices=['public', 'admin', 'internal', 'publicURL', 'adminURL', 'internalURL'], help="The endpoint type to use for the telemetry service."), cfg.BoolOpt('event_enabled', default=True, help="Runs Ceilometer event-related tests"), ] ceilometer-6.1.5/ceilometer/tests/tempest/scenario/0000775000567000056710000000000013072745164023603 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/tempest/scenario/test_object_storage_telemetry_middleware.py0000664000567000056710000001325313072744706034502 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from tempest.common.utils import data_utils from tempest import config from tempest.lib.common.utils import test_utils from tempest import test from ceilometer.tests.tempest.service import client CONF = config.CONF LOG = logging.getLogger(__name__) # Loop for up to 120 seconds waiting on notifications # NOTE(chdent): The choice of 120 seconds is fairly # arbitrary: Long enough to give the notifications the # chance to travel across a highly latent bus but not # so long as to allow excessive latency to never be visible. # TODO(chdent): Ideally this value would come from configuration. NOTIFICATIONS_WAIT = 120 NOTIFICATIONS_SLEEP = 1 class ClientManager(client.Manager): load_clients = [ 'telemetry_client', 'container_client', 'object_client', ] class TestObjectStorageTelemetry(test.BaseTestCase): """Test that swift uses the ceilometer middleware. * create container. * upload a file to the created container. * retrieve the file from the created container. * wait for notifications from ceilometer. """ credentials = ['primary'] client_manager = ClientManager @classmethod def skip_checks(cls): super(TestObjectStorageTelemetry, cls).skip_checks() if not CONF.service_available.swift: skip_msg = ("%s skipped as swift is not available" % cls.__name__) raise cls.skipException(skip_msg) if not CONF.service_available.ceilometer: skip_msg = ("%s skipped as ceilometer is not available" % cls.__name__) raise cls.skipException(skip_msg) @classmethod def setup_credentials(cls): cls.set_network_resources() super(TestObjectStorageTelemetry, cls).setup_credentials() @classmethod def setup_clients(cls): super(TestObjectStorageTelemetry, cls).setup_clients() cls.telemetry_client = cls.os_primary.telemetry_client cls.container_client = cls.os_primary.container_client cls.object_client = cls.os_primary.object_client def _confirm_notifications(self, container_name, obj_name): # NOTE: Loop seeking for appropriate notifications about the containers # and objects sent to swift. def _check_samples(): # NOTE: Return True only if we have notifications about some # containers and some objects and the notifications are about # the expected containers and objects. # Otherwise returning False will case _check_samples to be # called again. results = self.telemetry_client.list_samples( 'storage.objects.incoming.bytes') LOG.debug('got samples %s', results) # Extract container info from samples. containers, objects = [], [] for sample in results: meta = sample['resource_metadata'] if meta.get('container') and meta['container'] != 'None': containers.append(meta['container']) elif (meta.get('target.metadata:container') and meta['target.metadata:container'] != 'None'): containers.append(meta['target.metadata:container']) if meta.get('object') and meta['object'] != 'None': objects.append(meta['object']) elif (meta.get('target.metadata:object') and meta['target.metadata:object'] != 'None'): objects.append(meta['target.metadata:object']) return (container_name in containers and obj_name in objects) self.assertTrue( test_utils.call_until_true(_check_samples, NOTIFICATIONS_WAIT, NOTIFICATIONS_SLEEP), 'Correct notifications were not received after ' '%s seconds.' % NOTIFICATIONS_WAIT) def create_container(self): name = data_utils.rand_name('swift-scenario-container') self.container_client.create_container(name) # look for the container to assure it is created self.container_client.list_container_contents(name) LOG.debug('Container %s created' % (name)) self.addCleanup(self.container_client.delete_container, name) return name def upload_object_to_container(self, container_name): obj_name = data_utils.rand_name('swift-scenario-object') obj_data = data_utils.arbitrary_string() self.object_client.create_object(container_name, obj_name, obj_data) self.addCleanup(self.object_client.delete_object, container_name, obj_name) return obj_name @test.idempotent_id('6d6b88e5-3e38-41bc-b34a-79f713a6cb85') @test.services('object_storage') def test_swift_middleware_notifies(self): container_name = self.create_container() obj_name = self.upload_object_to_container(container_name) self._confirm_notifications(container_name, obj_name) ceilometer-6.1.5/ceilometer/tests/tempest/scenario/__init__.py0000664000567000056710000000000013072744703025700 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/tempest/api/0000775000567000056710000000000013072745164022551 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/tempest/api/test_telemetry_notification_api.py0000664000567000056710000000567413072744706031610 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Change-Id: I14e16a1a7d9813b324ee40545c07f0e88fb637b7 import testtools from ceilometer.tests.tempest.api import base from tempest import config from tempest.lib import decorators from tempest import test CONF = config.CONF class TelemetryNotificationAPITest(base.BaseTelemetryTest): @test.idempotent_id('d7f8c1c8-d470-4731-8604-315d3956caae') @test.services('compute') def test_check_nova_notification(self): body = self.create_server() query = ('resource', 'eq', body['id']) for metric in self.nova_notifications: self.await_samples(metric, query) @test.attr(type="smoke") @test.idempotent_id('04b10bfe-a5dc-47af-b22f-0460426bf499') @test.services("image") @testtools.skipIf(not CONF.image_feature_enabled.api_v1, "Glance api v1 is disabled") def test_check_glance_v1_notifications(self): body = self.create_image(self.image_client, is_public=False) self.image_client.update_image(body['id'], data='data') query = 'resource', 'eq', body['id'] self.image_client.delete_image(body['id']) for metric in self.glance_notifications: self.await_samples(metric, query) @test.attr(type="smoke") @test.idempotent_id('c240457d-d943-439b-8aea-85e26d64fe8f') @test.services("image") @testtools.skipIf(not CONF.image_feature_enabled.api_v2, "Glance api v2 is disabled") def test_check_glance_v2_notifications(self): body = self.create_image(self.image_client_v2, visibility='private') self.image_client_v2.store_image_file(body['id'], "file") self.image_client_v2.show_image_file(body['id']) query = 'resource', 'eq', body['id'] for metric in self.glance_v2_notifications: self.await_samples(metric, query) class TelemetryNotificationAdminAPITest(base.BaseTelemetryAdminTest): @test.idempotent_id('29604198-8b45-4fc0-8af8-1cae4f94ebea') @test.services('compute') @decorators.skip_because(bug='1480490') def test_check_nova_notification_event_and_meter(self): body = self.create_server() if CONF.telemetry.event_enabled: query = ('instance_id', 'eq', body['id']) self.await_events(query) query = ('resource', 'eq', body['id']) for metric in self.nova_notifications: self.await_samples(metric, query) ceilometer-6.1.5/ceilometer/tests/tempest/api/base.py0000664000567000056710000001277513072744706024052 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_utils import timeutils from tempest.common import compute from tempest.common.utils import data_utils from tempest import config from tempest import exceptions from tempest.lib import exceptions as lib_exc import tempest.test from ceilometer.tests.tempest.service import client CONF = config.CONF class ClientManager(client.Manager): load_clients = [ 'servers_client', 'compute_networks_client', 'compute_floating_ips_client', 'flavors_client', 'image_client', 'image_client_v2', 'telemetry_client', ] class BaseTelemetryTest(tempest.test.BaseTestCase): """Base test case class for all Telemetry API tests.""" credentials = ['primary'] client_manager = ClientManager @classmethod def skip_checks(cls): super(BaseTelemetryTest, cls).skip_checks() if not CONF.service_available.ceilometer: raise cls.skipException("Ceilometer support is required") @classmethod def setup_credentials(cls): cls.set_network_resources() super(BaseTelemetryTest, cls).setup_credentials() @classmethod def setup_clients(cls): super(BaseTelemetryTest, cls).setup_clients() cls.telemetry_client = cls.os_primary.telemetry_client cls.servers_client = cls.os_primary.servers_client cls.flavors_client = cls.os_primary.flavors_client cls.image_client = cls.os_primary.image_client cls.image_client_v2 = cls.os_primary.image_client_v2 @classmethod def resource_setup(cls): super(BaseTelemetryTest, cls).resource_setup() cls.nova_notifications = ['memory', 'vcpus', 'disk.root.size', 'disk.ephemeral.size'] cls.glance_notifications = ['image.size'] cls.glance_v2_notifications = ['image.download', 'image.serve'] cls.server_ids = [] cls.image_ids = [] @classmethod def create_server(cls): tenant_network = cls.get_tenant_network() body, server = compute.create_test_server( cls.os_primary, tenant_network=tenant_network, name=data_utils.rand_name('ceilometer-instance'), wait_until='ACTIVE') cls.server_ids.append(body['id']) return body @classmethod def create_image(cls, client, **kwargs): body = client.create_image(name=data_utils.rand_name('image'), container_format='bare', disk_format='raw', **kwargs) # TODO(jswarren) Move ['image'] up to initial body value assignment # once both v1 and v2 glance clients include the full response # object. if 'image' in body: body = body['image'] cls.image_ids.append(body['id']) return body @staticmethod def cleanup_resources(method, list_of_ids): for resource_id in list_of_ids: try: method(resource_id) except lib_exc.NotFound: pass @classmethod def resource_cleanup(cls): cls.cleanup_resources(cls.servers_client.delete_server, cls.server_ids) cls.cleanup_resources(cls.image_client.delete_image, cls.image_ids) super(BaseTelemetryTest, cls).resource_cleanup() def await_samples(self, metric, query): """This method is to wait for sample to add it to database. There are long time delays when using Postgresql (or Mysql) database as ceilometer backend """ timeout = CONF.compute.build_timeout start = timeutils.utcnow() while timeutils.delta_seconds(start, timeutils.utcnow()) < timeout: body = self.telemetry_client.list_samples(metric, query) if body: return body time.sleep(CONF.compute.build_interval) raise exceptions.TimeoutException( 'Sample for metric:%s with query:%s has not been added to the ' 'database within %d seconds' % (metric, query, CONF.compute.build_timeout)) class BaseTelemetryAdminTest(BaseTelemetryTest): """Base test case class for admin Telemetry API tests.""" credentials = ['primary', 'admin'] @classmethod def setup_clients(cls): super(BaseTelemetryAdminTest, cls).setup_clients() cls.telemetry_admin_client = cls.os_adm.telemetry_client def await_events(self, query): timeout = CONF.compute.build_timeout start = timeutils.utcnow() while timeutils.delta_seconds(start, timeutils.utcnow()) < timeout: body = self.telemetry_admin_client.list_events(query) if body: return body time.sleep(CONF.compute.build_interval) raise exceptions.TimeoutException( 'Event with query:%s has not been added to the ' 'database within %d seconds' % (query, CONF.compute.build_timeout)) ceilometer-6.1.5/ceilometer/tests/tempest/api/__init__.py0000664000567000056710000000000013072744703024646 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/tempest/plugin.py0000664000567000056710000000310313072744706023646 0ustar jenkinsjenkins00000000000000# # Copyright 2015 NEC Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from tempest import config from tempest.test_discover import plugins import ceilometer from ceilometer.tests.tempest import config as tempest_config class CeilometerTempestPlugin(plugins.TempestPlugin): def load_tests(self): base_path = os.path.split(os.path.dirname( os.path.abspath(ceilometer.__file__)))[0] test_dir = "ceilometer/tests/tempest" full_test_dir = os.path.join(base_path, test_dir) return full_test_dir, base_path def register_opts(self, conf): config.register_opt_group(conf, tempest_config.service_available_group, tempest_config.ServiceAvailableGroup) config.register_opt_group(conf, tempest_config.telemetry_group, tempest_config.TelemetryGroup) def get_opt_lists(self): return [(tempest_config.telemetry_group.name, tempest_config.TelemetryGroup)] ceilometer-6.1.5/ceilometer/tests/tempest/exceptions.py0000664000567000056710000001230613072744706024536 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools class TempestException(Exception): """Base Tempest Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = "An unknown exception occurred" def __init__(self, *args, **kwargs): super(TempestException, self).__init__() try: self._error_string = self.message % kwargs except Exception: # at least get the core message out if something happened self._error_string = self.message if len(args) > 0: # If there is a non-kwarg parameter, assume it's the error # message or reason description and tack it on to the end # of the exception message # Convert all arguments into their string representations... args = ["%s" % arg for arg in args] self._error_string = (self._error_string + "\nDetails: %s" % '\n'.join(args)) def __str__(self): return self._error_string class RestClientException(TempestException, testtools.TestCase.failureException): pass class InvalidConfiguration(TempestException): message = "Invalid Configuration" class InvalidCredentials(TempestException): message = "Invalid Credentials" class InvalidServiceTag(TempestException): message = "Invalid service tag" class InvalidIdentityVersion(TempestException): message = "Invalid version %(identity_version)s of the identity service" class TimeoutException(TempestException): message = "Request timed out" class BuildErrorException(TempestException): message = "Server %(server_id)s failed to build and is in ERROR status" class ImageKilledException(TempestException): message = "Image %(image_id)s 'killed' while waiting for '%(status)s'" class AddImageException(TempestException): message = "Image %(image_id)s failed to become ACTIVE in the allotted time" class VolumeBuildErrorException(TempestException): message = "Volume %(volume_id)s failed to build and is in ERROR status" class VolumeRestoreErrorException(TempestException): message = "Volume %(volume_id)s failed to restore and is in ERROR status" class SnapshotBuildErrorException(TempestException): message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status" class VolumeBackupException(TempestException): message = "Volume backup %(backup_id)s failed and is in ERROR status" class StackBuildErrorException(TempestException): message = ("Stack %(stack_identifier)s is in %(stack_status)s status " "due to '%(stack_status_reason)s'") class EndpointNotFound(TempestException): message = "Endpoint not found" class IdentityError(TempestException): message = "Got identity error" class ServerUnreachable(TempestException): message = "The server is not reachable via the configured network" # NOTE(andreaf) This exception is added here to facilitate the migration # of get_network_from_name and preprov_creds to tempest.lib, and it should # be migrated along with them class InvalidTestResource(TempestException): message = "%(name) is not a valid %(type), or the name is ambiguous" class RFCViolation(RestClientException): message = "RFC Violation" class InvalidHttpSuccessCode(RestClientException): message = "The success code is different than the expected one" class BadRequest(RestClientException): message = "Bad request" class ResponseWithNonEmptyBody(RFCViolation): message = ("RFC Violation! Response with %(status)d HTTP Status Code " "MUST NOT have a body") class ResponseWithEntity(RFCViolation): message = ("RFC Violation! Response with 205 HTTP Status Code " "MUST NOT have an entity") class InvalidHTTPResponseHeader(RestClientException): message = "HTTP response header is invalid" class InvalidStructure(TempestException): message = "Invalid structure of table with details" class CommandFailed(Exception): def __init__(self, returncode, cmd, output, stderr): super(CommandFailed, self).__init__() self.returncode = returncode self.cmd = cmd self.stdout = output self.stderr = stderr def __str__(self): return ("Command '%s' returned non-zero exit status %d.\n" "stdout:\n%s\n" "stderr:\n%s" % (self.cmd, self.returncode, self.stdout, self.stderr)) ceilometer-6.1.5/ceilometer/tests/tempest/service/0000775000567000056710000000000013072745164023440 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/tempest/service/client.py0000664000567000056710000001567213072744706025304 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils as json from six.moves.urllib import parse as urllib from tempest import config from tempest.lib.common import rest_client from tempest.lib.services.compute.flavors_client import FlavorsClient from tempest.lib.services.compute.floating_ips_client import FloatingIPsClient from tempest.lib.services.compute.networks_client import NetworksClient from tempest.lib.services.compute.servers_client import ServersClient from tempest import manager from tempest.services.object_storage.container_client import ContainerClient from tempest.services.object_storage.object_client import ObjectClient from ceilometer.tests.tempest.service.images.v1.images_client import \ ImagesClient from ceilometer.tests.tempest.service.images.v2.images_client import \ ImagesClient as ImagesClientV2 CONF = config.CONF class TelemetryClient(rest_client.RestClient): version = '2' uri_prefix = "v2" def deserialize(self, body): return json.loads(body.replace("\n", "")) def serialize(self, body): return json.dumps(body) def create_sample(self, meter_name, sample_list): uri = "%s/meters/%s" % (self.uri_prefix, meter_name) body = self.serialize(sample_list) resp, body = self.post(uri, body) self.expected_success(200, resp.status) body = self.deserialize(body) return rest_client.ResponseBody(resp, body) def _helper_list(self, uri, query=None, period=None): uri_dict = {} if query: uri_dict = {'q.field': query[0], 'q.op': query[1], 'q.value': query[2]} if period: uri_dict['period'] = period if uri_dict: uri += "?%s" % urllib.urlencode(uri_dict) resp, body = self.get(uri) self.expected_success(200, resp.status) body = self.deserialize(body) return rest_client.ResponseBodyList(resp, body) def list_resources(self, query=None): uri = '%s/resources' % self.uri_prefix return self._helper_list(uri, query) def list_meters(self, query=None): uri = '%s/meters' % self.uri_prefix return self._helper_list(uri, query) def list_statistics(self, meter, period=None, query=None): uri = "%s/meters/%s/statistics" % (self.uri_prefix, meter) return self._helper_list(uri, query, period) def list_samples(self, meter_id, query=None): uri = '%s/meters/%s' % (self.uri_prefix, meter_id) return self._helper_list(uri, query) def list_events(self, query=None): uri = '%s/events' % self.uri_prefix return self._helper_list(uri, query) def show_resource(self, resource_id): uri = '%s/resources/%s' % (self.uri_prefix, resource_id) resp, body = self.get(uri) self.expected_success(200, resp.status) body = self.deserialize(body) return rest_client.ResponseBody(resp, body) class Manager(manager.Manager): load_clients = [ 'servers_client', 'compute_networks_client', 'compute_floating_ips_client', 'flavors_client', 'image_client', 'image_client_v2', 'telemetry_client', 'container_client', 'object_client', ] default_params = { 'disable_ssl_certificate_validation': CONF.identity.disable_ssl_certificate_validation, 'ca_certs': CONF.identity.ca_certificates_file, 'trace_requests': CONF.debug.trace_requests } compute_params = { 'service': CONF.compute.catalog_type, 'region': CONF.compute.region or CONF.identity.region, 'endpoint_type': CONF.compute.endpoint_type, 'build_interval': CONF.compute.build_interval, 'build_timeout': CONF.compute.build_timeout, } compute_params.update(default_params) image_params = { 'catalog_type': CONF.image.catalog_type, 'region': CONF.image.region or CONF.identity.region, 'endpoint_type': CONF.image.endpoint_type, 'build_interval': CONF.image.build_interval, 'build_timeout': CONF.image.build_timeout, } image_params.update(default_params) telemetry_params = { 'service': CONF.telemetry.catalog_type, 'region': CONF.identity.region, 'endpoint_type': CONF.telemetry.endpoint_type, } telemetry_params.update(default_params) object_storage_params = { 'service': CONF.object_storage.catalog_type, 'region': CONF.object_storage.region or CONF.identity.region, 'endpoint_type': CONF.object_storage.endpoint_type } object_storage_params.update(default_params) def __init__(self, credentials=None, service=None): super(Manager, self).__init__(credentials) for client in self.load_clients: getattr(self, 'set_%s' % client)() def set_servers_client(self): self.servers_client = ServersClient(self.auth_provider, **self.compute_params) def set_compute_networks_client(self): self.compute_networks_client = NetworksClient(self.auth_provider, **self.compute_params) def set_compute_floating_ips_client(self): self.compute_floating_ips_client = FloatingIPsClient( self.auth_provider, **self.compute_params) def set_flavors_client(self): self.flavors_client = FlavorsClient(self.auth_provider, **self.compute_params) def set_image_client(self): self.image_client = ImagesClient(self.auth_provider, **self.image_params) def set_image_client_v2(self): self.image_client_v2 = ImagesClientV2(self.auth_provider, **self.image_params) def set_telemetry_client(self): self.telemetry_client = TelemetryClient(self.auth_provider, **self.telemetry_params) def set_container_client(self): self.container_client = ContainerClient(self.auth_provider, **self.object_storage_params) def set_object_client(self): self.object_client = ObjectClient(self.auth_provider, **self.object_storage_params) ceilometer-6.1.5/ceilometer/tests/tempest/service/images/0000775000567000056710000000000013072745164024705 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/tempest/service/images/glance_http.py0000664000567000056710000003344213072744706027556 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Originally copied from python-glanceclient import copy import hashlib import posixpath import re import socket import struct import OpenSSL from oslo_log import log as logging import six from six import moves from six.moves import http_client as httplib from six.moves.urllib import parse as urlparse from ceilometer.tests.tempest import exceptions as exc LOG = logging.getLogger(__name__) USER_AGENT = 'tempest' CHUNKSIZE = 1024 * 64 # 64kB TOKEN_CHARS_RE = re.compile('^[-A-Za-z0-9+/=]*$') class HTTPClient(object): def __init__(self, auth_provider, filters, **kwargs): self.auth_provider = auth_provider self.filters = filters self.endpoint = auth_provider.base_url(filters) endpoint_parts = urlparse.urlparse(self.endpoint) self.endpoint_scheme = endpoint_parts.scheme self.endpoint_hostname = endpoint_parts.hostname self.endpoint_port = endpoint_parts.port self.connection_class = self._get_connection_class( self.endpoint_scheme) self.connection_kwargs = self._get_connection_kwargs( self.endpoint_scheme, **kwargs) @staticmethod def _get_connection_class(scheme): if scheme == 'https': return VerifiedHTTPSConnection else: return httplib.HTTPConnection @staticmethod def _get_connection_kwargs(scheme, **kwargs): _kwargs = {'timeout': float(kwargs.get('timeout', 600))} if scheme == 'https': _kwargs['ca_certs'] = kwargs.get('ca_certs', None) _kwargs['cert_file'] = kwargs.get('cert_file', None) _kwargs['key_file'] = kwargs.get('key_file', None) _kwargs['insecure'] = kwargs.get('insecure', False) _kwargs['ssl_compression'] = kwargs.get('ssl_compression', True) return _kwargs def _get_connection(self): _class = self.connection_class try: return _class(self.endpoint_hostname, self.endpoint_port, **self.connection_kwargs) except httplib.InvalidURL: raise exc.EndpointNotFound def _http_request(self, url, method, **kwargs): """Send an http request with the specified characteristics. Wrapper around httplib.HTTP(S)Connection.request to handle tasks such as setting headers and error handling. """ # Copy the kwargs so we can reuse the original in case of redirects kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {})) kwargs['headers'].setdefault('User-Agent', USER_AGENT) self._log_request(method, url, kwargs['headers']) conn = self._get_connection() try: url_parts = urlparse.urlparse(url) conn_url = posixpath.normpath(url_parts.path) LOG.debug('Actual Path: {path}'.format(path=conn_url)) if kwargs['headers'].get('Transfer-Encoding') == 'chunked': conn.putrequest(method, conn_url) for header, value in kwargs['headers'].items(): conn.putheader(header, value) conn.endheaders() chunk = kwargs['body'].read(CHUNKSIZE) # Chunk it, baby... while chunk: conn.send('%x\r\n%s\r\n' % (len(chunk), chunk)) chunk = kwargs['body'].read(CHUNKSIZE) conn.send('0\r\n\r\n') else: conn.request(method, conn_url, **kwargs) resp = conn.getresponse() except socket.gaierror as e: message = ("Error finding address for %(url)s: %(e)s" % {'url': url, 'e': e}) raise exc.EndpointNotFound(message) except (socket.error, socket.timeout) as e: message = ("Error communicating with %(endpoint)s %(e)s" % {'endpoint': self.endpoint, 'e': e}) raise exc.TimeoutException(message) body_iter = ResponseBodyIterator(resp) # Read body into string if it isn't obviously image data if resp.getheader('content-type', None) != 'application/octet-stream': body_str = ''.join([body_chunk for body_chunk in body_iter]) body_iter = six.StringIO(body_str) self._log_response(resp, None) else: self._log_response(resp, body_iter) return resp, body_iter def _log_request(self, method, url, headers): LOG.info('Request: ' + method + ' ' + url) if headers: headers_out = headers if 'X-Auth-Token' in headers and headers['X-Auth-Token']: token = headers['X-Auth-Token'] if len(token) > 64 and TOKEN_CHARS_RE.match(token): headers_out = headers.copy() headers_out['X-Auth-Token'] = "" LOG.info('Request Headers: ' + str(headers_out)) def _log_response(self, resp, body): status = str(resp.status) LOG.info("Response Status: " + status) if resp.getheaders(): LOG.info('Response Headers: ' + str(resp.getheaders())) if body: str_body = str(body) length = len(body) LOG.info('Response Body: ' + str_body[:2048]) if length >= 2048: self.LOG.debug("Large body (%d) md5 summary: %s", length, hashlib.md5(str_body).hexdigest()) def raw_request(self, method, url, **kwargs): kwargs.setdefault('headers', {}) kwargs['headers'].setdefault('Content-Type', 'application/octet-stream') if 'body' in kwargs: if (hasattr(kwargs['body'], 'read') and method.lower() in ('post', 'put')): # We use 'Transfer-Encoding: chunked' because # body size may not always be known in advance. kwargs['headers']['Transfer-Encoding'] = 'chunked' # Decorate the request with auth req_url, kwargs['headers'], kwargs['body'] = \ self.auth_provider.auth_request( method=method, url=url, headers=kwargs['headers'], body=kwargs.get('body', None), filters=self.filters) return self._http_request(req_url, method, **kwargs) class OpenSSLConnectionDelegator(object): """An OpenSSL.SSL.Connection delegator. Supplies an additional 'makefile' method which httplib requires and is not present in OpenSSL.SSL.Connection. Note: Since it is not possible to inherit from OpenSSL.SSL.Connection a delegator must be used. """ def __init__(self, *args, **kwargs): self.connection = OpenSSL.SSL.Connection(*args, **kwargs) def __getattr__(self, name): return getattr(self.connection, name) def makefile(self, *args, **kwargs): # Ensure the socket is closed when this file is closed kwargs['close'] = True return socket._fileobject(self.connection, *args, **kwargs) class VerifiedHTTPSConnection(httplib.HTTPSConnection): """Extended HTTPSConnection which uses OpenSSL library for enhanced SSL Note: Much of this functionality can eventually be replaced with native Python 3.3 code. """ def __init__(self, host, port=None, key_file=None, cert_file=None, ca_certs=None, timeout=None, insecure=False, ssl_compression=True): httplib.HTTPSConnection.__init__(self, host, port, key_file=key_file, cert_file=cert_file) self.key_file = key_file self.cert_file = cert_file self.timeout = timeout self.insecure = insecure self.ssl_compression = ssl_compression self.ca_certs = ca_certs self.setcontext() @staticmethod def host_matches_cert(host, x509): """Verify that the x509 certificate we have received from 'host' Identifies the server we are connecting to, ie that the certificate's Common Name or a Subject Alternative Name matches 'host'. """ # First see if we can match the CN if x509.get_subject().commonName == host: return True # Also try Subject Alternative Names for a match san_list = None for i in moves.xrange(x509.get_extension_count()): ext = x509.get_extension(i) if ext.get_short_name() == 'subjectAltName': san_list = str(ext) for san in ''.join(san_list.split()).split(','): if san == "DNS:%s" % host: return True # Server certificate does not match host msg = ('Host "%s" does not match x509 certificate contents: ' 'CommonName "%s"' % (host, x509.get_subject().commonName)) if san_list is not None: msg = msg + ', subjectAltName "%s"' % san_list raise exc.SSLCertificateError(msg) def verify_callback(self, connection, x509, errnum, depth, preverify_ok): if x509.has_expired(): msg = "SSL Certificate expired on '%s'" % x509.get_notAfter() raise exc.SSLCertificateError(msg) if depth == 0 and preverify_ok is True: # We verify that the host matches against the last # certificate in the chain return self.host_matches_cert(self.host, x509) else: # Pass through OpenSSL's default result return preverify_ok def setcontext(self): """Set up the OpenSSL context.""" self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD) if self.ssl_compression is False: self.context.set_options(0x20000) # SSL_OP_NO_COMPRESSION if self.insecure is not True: self.context.set_verify(OpenSSL.SSL.VERIFY_PEER, self.verify_callback) else: self.context.set_verify(OpenSSL.SSL.VERIFY_NONE, self.verify_callback) if self.cert_file: try: self.context.use_certificate_file(self.cert_file) except Exception as e: msg = 'Unable to load cert from "%s" %s' % (self.cert_file, e) raise exc.SSLConfigurationError(msg) if self.key_file is None: # We support having key and cert in same file try: self.context.use_privatekey_file(self.cert_file) except Exception as e: msg = ('No key file specified and unable to load key ' 'from "%s" %s' % (self.cert_file, e)) raise exc.SSLConfigurationError(msg) if self.key_file: try: self.context.use_privatekey_file(self.key_file) except Exception as e: msg = 'Unable to load key from "%s" %s' % (self.key_file, e) raise exc.SSLConfigurationError(msg) if self.ca_certs: try: self.context.load_verify_locations(self.ca_certs) except Exception as e: msg = 'Unable to load CA from "%s" %s' % (self.ca_certs, e) raise exc.SSLConfigurationError(msg) else: self.context.set_default_verify_paths() def connect(self): """Connect to SSL port and apply per-connection parameters.""" try: addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM) except OSError as msg: raise exc.RestClientException(msg) for res in addresses: af, socktype, proto, canonname, sa = res sock = socket.socket(af, socket.SOCK_STREAM) if self.timeout is not None: # '0' microseconds sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, struct.pack('LL', self.timeout, 0)) self.sock = OpenSSLConnectionDelegator(self.context, sock) try: self.sock.connect(sa) except OSError as msg: if self.sock: self.sock = None continue break if self.sock is None: # Happen only when all results have failed. raise exc.RestClientException('Cannot connect to %s' % self.host) def close(self): if self.sock: # Remove the reference to the socket but don't close it yet. # Response close will close both socket and associated # file. Closing socket too soon will cause response # reads to fail with socket IO error 'Bad file descriptor'. self.sock = None httplib.HTTPSConnection.close(self) class ResponseBodyIterator(object): """A class that acts as an iterator over an HTTP response.""" def __init__(self, resp): self.resp = resp def __iter__(self): while True: yield self.next() def next(self): chunk = self.resp.read(CHUNKSIZE) if chunk: return chunk else: raise StopIteration() ceilometer-6.1.5/ceilometer/tests/tempest/service/images/v1/0000775000567000056710000000000013072745164025233 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/tempest/service/images/v1/images_client.py0000664000567000056710000002261713072744706030421 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import errno import os from oslo_log import log as logging from oslo_serialization import jsonutils as json import six from six.moves.urllib import parse as urllib from tempest.lib.common import rest_client from tempest.lib import exceptions as lib_exc from ceilometer.tests.tempest.service.images import glance_http LOG = logging.getLogger(__name__) class ImagesClient(rest_client.RestClient): def __init__(self, auth_provider, catalog_type, region, **kwargs): super(ImagesClient, self).__init__( auth_provider, catalog_type, region, **kwargs) self._http = None self.dscv = kwargs.get("disable_ssl_certificate_validation") self.ca_certs = kwargs.get("ca_certs") def _image_meta_from_headers(self, headers): meta = {'properties': {}} for key, value in six.iteritems(headers): if key.startswith('x-image-meta-property-'): _key = key[22:] meta['properties'][_key] = value elif key.startswith('x-image-meta-'): _key = key[13:] meta[_key] = value for key in ['is_public', 'protected', 'deleted']: if key in meta: meta[key] = meta[key].strip().lower() in ('t', 'true', 'yes', '1') for key in ['size', 'min_ram', 'min_disk']: if key in meta: try: meta[key] = int(meta[key]) except ValueError: pass return meta def _image_meta_to_headers(self, fields): headers = {} fields_copy = copy.deepcopy(fields) copy_from = fields_copy.pop('copy_from', None) if copy_from is not None: headers['x-glance-api-copy-from'] = copy_from for key, value in six.iteritems(fields_copy.pop('properties', {})): headers['x-image-meta-property-%s' % key] = str(value) for key, value in six.iteritems(fields_copy.pop('api', {})): headers['x-glance-api-property-%s' % key] = str(value) for key, value in six.iteritems(fields_copy): headers['x-image-meta-%s' % key] = str(value) return headers def _get_file_size(self, obj): """Analyze file-like object and attempt to determine its size. :param obj: file-like object, typically redirected from stdin. :retval The file's size or None if it cannot be determined. """ # For large images, we need to supply the size of the # image file. See LP Bugs #827660 and #845788. if hasattr(obj, 'seek') and hasattr(obj, 'tell'): try: obj.seek(0, os.SEEK_END) obj_size = obj.tell() obj.seek(0) return obj_size except IOError as e: if e.errno == errno.ESPIPE: # Illegal seek. This means the user is trying # to pipe image data to the client, e.g. # echo testdata | bin/glance add blah..., or # that stdin is empty, or that a file-like # object which doesn't support 'seek/tell' has # been supplied. return None else: raise else: # Cannot determine size of input image return None def _get_http(self): return glance_http.HTTPClient(auth_provider=self.auth_provider, filters=self.filters, insecure=self.dscv, ca_certs=self.ca_certs) def _create_with_data(self, headers, data): resp, body_iter = self.http.raw_request('POST', '/v1/images', headers=headers, body=data) self._error_checker(resp, body_iter) body = json.loads(''.join([c for c in body_iter])) return rest_client.ResponseBody(resp, body) def _update_with_data(self, image_id, headers, data): url = '/v1/images/%s' % image_id resp, body_iter = self.http.raw_request('PUT', url, headers=headers, body=data) self._error_checker(resp, body_iter) body = json.loads(''.join([c for c in body_iter])) return rest_client.ResponseBody(resp, body) @property def http(self): if self._http is None: self._http = self._get_http() return self._http def create_image(self, **kwargs): headers = {} data = kwargs.pop('data', None) headers.update(self._image_meta_to_headers(kwargs)) if data is not None: return self._create_with_data(headers, data) resp, body = self.post('v1/images', None, headers) self.expected_success(201, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def update_image(self, image_id, **kwargs): headers = {} data = kwargs.pop('data', None) headers.update(self._image_meta_to_headers(kwargs)) if data is not None: return self._update_with_data(image_id, headers, data) url = 'v1/images/%s' % image_id resp, body = self.put(url, None, headers) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def delete_image(self, image_id): url = 'v1/images/%s' % image_id resp, body = self.delete(url) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) def list_images(self, detail=False, **kwargs): """Return a list of all images filtered by input parameters. Available params: see http://developer.openstack.org/ api-ref-image-v1.html#listImage-v1 Most parameters except the following are passed to the API without any changes. :param changes_since: The name is changed to changes-since """ url = 'v1/images' if detail: url += '/detail' properties = kwargs.pop('properties', {}) for key, value in six.iteritems(properties): kwargs['property-%s' % key] = value if kwargs.get('changes_since'): kwargs['changes-since'] = kwargs.pop('changes_since') if len(kwargs) > 0: url += '?%s' % urllib.urlencode(kwargs) resp, body = self.get(url) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def check_image(self, image_id): """Check image metadata.""" url = 'v1/images/%s' % image_id resp, __ = self.head(url) self.expected_success(200, resp.status) body = self._image_meta_from_headers(resp) return rest_client.ResponseBody(resp, body) def show_image(self, image_id): """Get image details plus the image itself.""" url = 'v1/images/%s' % image_id resp, body = self.get(url) self.expected_success(200, resp.status) return rest_client.ResponseBodyData(resp, body) def is_resource_deleted(self, id): try: if self.check_image(id)['status'] == 'deleted': return True except lib_exc.NotFound: return True return False @property def resource_type(self): """Returns the primary type of resource this client works with.""" return 'image_meta' def list_image_members(self, image_id): url = 'v1/images/%s/members' % image_id resp, body = self.get(url) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def list_shared_images(self, tenant_id): """List shared images with the specified tenant""" url = 'v1/shared-images/%s' % tenant_id resp, body = self.get(url) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def add_member(self, member_id, image_id, **kwargs): """Add a member to an image. Available params: see http://developer.openstack.org/ api-ref-image-v1.html#addMember-v1 """ url = 'v1/images/%s/members/%s' % (image_id, member_id) body = json.dumps({'member': kwargs}) resp, __ = self.put(url, body) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp) def delete_member(self, member_id, image_id): url = 'v1/images/%s/members/%s' % (image_id, member_id) resp, __ = self.delete(url) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp) ceilometer-6.1.5/ceilometer/tests/tempest/service/images/v1/__init__.py0000664000567000056710000000000013072744706027333 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/tempest/service/images/v2/0000775000567000056710000000000013072745164025234 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/tempest/service/images/v2/images_client.py0000664000567000056710000002173513072744706030422 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils as json from six.moves.urllib import parse as urllib from tempest.lib.common import rest_client from tempest.lib import exceptions as lib_exc from ceilometer.tests.tempest.service.images import glance_http class ImagesClient(rest_client.RestClient): def __init__(self, auth_provider, catalog_type, region, **kwargs): super(ImagesClient, self).__init__( auth_provider, catalog_type, region, **kwargs) self._http = None self.dscv = kwargs.get("disable_ssl_certificate_validation") self.ca_certs = kwargs.get("ca_certs") def _get_http(self): return glance_http.HTTPClient(auth_provider=self.auth_provider, filters=self.filters, insecure=self.dscv, ca_certs=self.ca_certs) @property def http(self): if self._http is None: self._http = self._get_http() return self._http def update_image(self, image_id, patch): """Update an image. Available params: see http://developer.openstack.org/ api-ref-image-v2.html#updateImage-v2 """ data = json.dumps(patch) headers = {"Content-Type": "application/openstack-images-v2.0" "-json-patch"} resp, body = self.patch('v2/images/%s' % image_id, data, headers) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def create_image(self, **kwargs): """Create an image. Available params: see http://developer.openstack.org/ api-ref-image-v2.html#createImage-v2 """ data = json.dumps(kwargs) resp, body = self.post('v2/images', data) self.expected_success(201, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def deactivate_image(self, image_id): url = 'v2/images/%s/actions/deactivate' % image_id resp, body = self.post(url, None) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body) def reactivate_image(self, image_id): url = 'v2/images/%s/actions/reactivate' % image_id resp, body = self.post(url, None) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body) def delete_image(self, image_id): url = 'v2/images/%s' % image_id resp, _ = self.delete(url) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp) def list_images(self, params=None): url = 'v2/images' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def show_image(self, image_id): url = 'v2/images/%s' % image_id resp, body = self.get(url) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def is_resource_deleted(self, id): try: self.show_image(id) except lib_exc.NotFound: return True return False @property def resource_type(self): """Returns the primary type of resource this client works with.""" return 'image' def store_image_file(self, image_id, data): url = 'v2/images/%s/file' % image_id headers = {'Content-Type': 'application/octet-stream'} resp, body = self.http.raw_request('PUT', url, headers=headers, body=data) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body) def show_image_file(self, image_id): url = 'v2/images/%s/file' % image_id resp, body = self.get(url) self.expected_success(200, resp.status) return rest_client.ResponseBodyData(resp, body) def add_image_tag(self, image_id, tag): url = 'v2/images/%s/tags/%s' % (image_id, tag) resp, body = self.put(url, body=None) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body) def delete_image_tag(self, image_id, tag): url = 'v2/images/%s/tags/%s' % (image_id, tag) resp, _ = self.delete(url) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp) def list_image_members(self, image_id): url = 'v2/images/%s/members' % image_id resp, body = self.get(url) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def create_image_member(self, image_id, **kwargs): """Create an image member. Available params: see http://developer.openstack.org/ api-ref-image-v2.html#createImageMember-v2 """ url = 'v2/images/%s/members' % image_id data = json.dumps(kwargs) resp, body = self.post(url, data) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def update_image_member(self, image_id, member_id, **kwargs): """Update an image member. Available params: see http://developer.openstack.org/ api-ref-image-v2.html#updateImageMember-v2 """ url = 'v2/images/%s/members/%s' % (image_id, member_id) data = json.dumps(kwargs) resp, body = self.put(url, data) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def show_image_member(self, image_id, member_id): url = 'v2/images/%s/members/%s' % (image_id, member_id) resp, body = self.get(url) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, json.loads(body)) def delete_image_member(self, image_id, member_id): url = 'v2/images/%s/members/%s' % (image_id, member_id) resp, _ = self.delete(url) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp) def show_schema(self, schema): url = 'v2/schemas/%s' % schema resp, body = self.get(url) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def list_resource_types(self): url = '/v2/metadefs/resource_types' resp, body = self.get(url) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def create_namespace(self, **kwargs): """Create a namespace. Available params: see http://developer.openstack.org/ api-ref-image-v2.html#createNamespace-v2 """ data = json.dumps(kwargs) resp, body = self.post('/v2/metadefs/namespaces', data) self.expected_success(201, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def show_namespace(self, namespace): url = '/v2/metadefs/namespaces/%s' % namespace resp, body = self.get(url) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def update_namespace(self, namespace, **kwargs): """Update a namespace. Available params: see http://developer.openstack.org/ api-ref-image-v2.html#updateNamespace-v2 """ # NOTE: On Glance API, we need to pass namespace on both URI # and a request body. params = {'namespace': namespace} params.update(kwargs) data = json.dumps(params) url = '/v2/metadefs/namespaces/%s' % namespace resp, body = self.put(url, body=data) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def delete_namespace(self, namespace): url = '/v2/metadefs/namespaces/%s' % namespace resp, _ = self.delete(url) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp) ceilometer-6.1.5/ceilometer/tests/tempest/service/images/v2/__init__.py0000664000567000056710000000000013072744706027334 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/tempest/service/images/__init__.py0000664000567000056710000000000013072744706027005 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/tempest/service/__init__.py0000664000567000056710000000000013072744703025535 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/tempest/__init__.py0000664000567000056710000000000013072744703024075 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/mocks.py0000664000567000056710000000667413072744703022020 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import happybase class MockHBaseTable(happybase.Table): def __init__(self, name, connection, data_prefix): # data_prefix is added to all rows which are written # in this test. It allows to divide data from different tests self.data_prefix = data_prefix # We create happybase Table with prefix from # CEILOMETER_TEST_HBASE_TABLE_PREFIX prefix = os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX", 'test') separator = os.getenv( "CEILOMETER_TEST_HBASE_TABLE_PREFIX_SEPARATOR", '_') super(MockHBaseTable, self).__init__( "%s%s%s" % (prefix, separator, name), connection) def put(self, row, *args, **kwargs): row = self.data_prefix + row return super(MockHBaseTable, self).put(row, *args, **kwargs) def scan(self, row_start=None, row_stop=None, row_prefix=None, columns=None, filter=None, timestamp=None, include_timestamp=False, batch_size=10, scan_batching=None, limit=None, sorted_columns=False): # Add data prefix for row parameters # row_prefix could not be combined with row_start or row_stop if not row_start and not row_stop: row_prefix = self.data_prefix + (row_prefix or "") row_start = None row_stop = None elif row_start and not row_stop: # Adding data_prefix to row_start and row_stop does not work # if it looks like row_start = %data_prefix%foo, # row_stop = %data_prefix, because row_start > row_stop filter = self._update_filter_row(filter) row_start = self.data_prefix + row_start else: row_start = self.data_prefix + (row_start or "") row_stop = self.data_prefix + (row_stop or "") gen = super(MockHBaseTable, self).scan(row_start, row_stop, row_prefix, columns, filter, timestamp, include_timestamp, batch_size, scan_batching, limit, sorted_columns) data_prefix_len = len(self.data_prefix) # Restore original row format for row, data in gen: yield (row[data_prefix_len:], data) def row(self, row, *args, **kwargs): row = self.data_prefix + row return super(MockHBaseTable, self).row(row, *args, **kwargs) def delete(self, row, *args, **kwargs): row = self.data_prefix + row return super(MockHBaseTable, self).delete(row, *args, **kwargs) def _update_filter_row(self, filter): if filter: return "PrefixFilter(%s) AND %s" % (self.data_prefix, filter) else: return "PrefixFilter(%s)" % self.data_prefix ceilometer-6.1.5/ceilometer/tests/functional/0000775000567000056710000000000013072745164022461 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/functional/hooks/0000775000567000056710000000000013072745164023604 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/functional/hooks/post_test_hook.sh0000775000567000056710000000343513072744706027215 0ustar jenkinsjenkins00000000000000#!/bin/bash -xe # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script is executed inside post_test_hook function in devstack gate. function generate_testr_results { if [ -f .testrepository/0 ]; then sudo .tox/functional/bin/testr last --subunit > $WORKSPACE/testrepository.subunit sudo mv $WORKSPACE/testrepository.subunit $BASE/logs/testrepository.subunit sudo /usr/os-testr-env/bin/subunit2html $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html sudo gzip -9 $BASE/logs/testrepository.subunit sudo gzip -9 $BASE/logs/testr_results.html sudo chown jenkins:jenkins $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz fi } export CEILOMETER_DIR="$BASE/new/ceilometer" # Go to the ceilometer dir cd $CEILOMETER_DIR if [[ -z "$STACK_USER" ]]; then export STACK_USER=stack fi sudo chown -R $STACK_USER:stack $CEILOMETER_DIR # Run tests echo "Running ceilometer functional test suite" set +e # NOTE(ityaptin) Expected a script param which contains a backend name CEILOMETER_TEST_BACKEND="$1" sudo -E -H -u ${STACK_USER:-${USER}} tox -efunctional EXIT_CODE=$? set -e # Collect and parse result generate_testr_results exit $EXIT_CODE ceilometer-6.1.5/ceilometer/tests/functional/publisher/0000775000567000056710000000000013072745164024456 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/functional/publisher/test_direct.py0000664000567000056710000000651613072744706027352 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/direct.py """ import datetime import uuid from oslo_utils import netutils from ceilometer.event.storage import models as event from ceilometer.publisher import direct from ceilometer import sample from ceilometer.tests import db as tests_db class TestDirectPublisher(tests_db.TestBase): resource_id = str(uuid.uuid4()) test_data = [ sample.Sample( name='alpha', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='beta', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='gamma', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.now().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] def test_direct_publisher(self): """Test samples are saved.""" self.CONF.set_override('connection', self.db_manager.url, group='database') parsed_url = netutils.urlsplit('direct://') publisher = direct.DirectPublisher(parsed_url) publisher.publish_samples(None, self.test_data) meters = list(self.conn.get_meters(resource=self.resource_id)) names = sorted([meter.name for meter in meters]) self.assertEqual(3, len(meters), 'There should be 3 samples') self.assertEqual(['alpha', 'beta', 'gamma'], names) class TestEventDirectPublisher(tests_db.TestBase): test_data = [event.Event(message_id=str(uuid.uuid4()), event_type='event_%d' % i, generated=datetime.datetime.utcnow(), traits=[], raw={}) for i in range(0, 5)] def test_direct_publisher(self): parsed_url = netutils.urlsplit('direct://') publisher = direct.DirectPublisher(parsed_url) publisher.publish_events(None, self.test_data) e_types = list(self.event_conn.get_event_types()) self.assertEqual(5, len(e_types)) self.assertEqual(['event_%d' % i for i in range(0, 5)], sorted(e_types)) ceilometer-6.1.5/ceilometer/tests/functional/publisher/__init__.py0000664000567000056710000000000013072744703026553 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/functional/api/0000775000567000056710000000000013072745164023232 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/functional/api/v2/0000775000567000056710000000000013072745164023561 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py0000664000567000056710000001714613072744703035231 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test listing raw events. """ import datetime import mock from oslo_utils import timeutils from ceilometer.storage import models from ceilometer.tests.functional.api import v2 class TestComputeDurationByResource(v2.FunctionalTest): def setUp(self): super(TestComputeDurationByResource, self).setUp() # Create events relative to the range and pretend # that the intervening events exist. self.early1 = datetime.datetime(2012, 8, 27, 7, 0) self.early2 = datetime.datetime(2012, 8, 27, 17, 0) self.start = datetime.datetime(2012, 8, 28, 0, 0) self.middle1 = datetime.datetime(2012, 8, 28, 8, 0) self.middle2 = datetime.datetime(2012, 8, 28, 18, 0) self.end = datetime.datetime(2012, 8, 28, 23, 59) self.late1 = datetime.datetime(2012, 8, 29, 9, 0) self.late2 = datetime.datetime(2012, 8, 29, 19, 0) def _patch_get_interval(self, start, end): def get_interval(sample_filter, period, groupby, aggregate): self.assertIsNotNone(sample_filter.start_timestamp) self.assertIsNotNone(sample_filter.end_timestamp) if (sample_filter.start_timestamp > end or sample_filter.end_timestamp < start): return [] duration_start = max(sample_filter.start_timestamp, start) duration_end = min(sample_filter.end_timestamp, end) duration = timeutils.delta_seconds(duration_start, duration_end) return [ models.Statistics( unit='', min=0, max=0, avg=0, sum=0, count=0, period=None, period_start=None, period_end=None, duration=duration, duration_start=duration_start, duration_end=duration_end, groupby=None, ) ] return mock.patch.object(type(self.conn), 'get_meter_statistics', side_effect=get_interval) def _invoke_api(self): return self.get_json('/meters/instance/statistics', q=[{'field': 'timestamp', 'op': 'ge', 'value': self.start.isoformat()}, {'field': 'timestamp', 'op': 'le', 'value': self.end.isoformat()}, {'field': 'search_offset', 'value': 10}]) def test_before_range(self): with self._patch_get_interval(self.early1, self.early2): data = self._invoke_api() self.assertEqual([], data) def _assert_times_match(self, actual, expected): if actual: actual = timeutils.parse_isotime(actual) actual = actual.replace(tzinfo=None) self.assertEqual(expected, actual) def test_overlap_range_start(self): with self._patch_get_interval(self.early1, self.middle1): data = self._invoke_api() self._assert_times_match(data[0]['duration_start'], self.start) self._assert_times_match(data[0]['duration_end'], self.middle1) self.assertEqual(8 * 60 * 60, data[0]['duration']) def test_within_range(self): with self._patch_get_interval(self.middle1, self.middle2): data = self._invoke_api() self._assert_times_match(data[0]['duration_start'], self.middle1) self._assert_times_match(data[0]['duration_end'], self.middle2) self.assertEqual(10 * 60 * 60, data[0]['duration']) def test_within_range_zero_duration(self): with self._patch_get_interval(self.middle1, self.middle1): data = self._invoke_api() self._assert_times_match(data[0]['duration_start'], self.middle1) self._assert_times_match(data[0]['duration_end'], self.middle1) self.assertEqual(0, data[0]['duration']) def test_overlap_range_end(self): with self._patch_get_interval(self.middle2, self.late1): data = self._invoke_api() self._assert_times_match(data[0]['duration_start'], self.middle2) self._assert_times_match(data[0]['duration_end'], self.end) self.assertEqual(((6 * 60) - 1) * 60, data[0]['duration']) def test_after_range(self): with self._patch_get_interval(self.late1, self.late2): data = self._invoke_api() self.assertEqual([], data) def test_without_end_timestamp(self): statistics = [ models.Statistics( unit=None, count=0, min=None, max=None, avg=None, duration=None, duration_start=self.late1, duration_end=self.late2, sum=0, period=None, period_start=None, period_end=None, groupby=None, ) ] with mock.patch.object(type(self.conn), 'get_meter_statistics', return_value=statistics): data = self.get_json('/meters/instance/statistics', q=[{'field': 'timestamp', 'op': 'ge', 'value': self.late1.isoformat()}, {'field': 'resource_id', 'value': 'resource-id'}, {'field': 'search_offset', 'value': 10}]) self._assert_times_match(data[0]['duration_start'], self.late1) self._assert_times_match(data[0]['duration_end'], self.late2) def test_without_start_timestamp(self): statistics = [ models.Statistics( unit=None, count=0, min=None, max=None, avg=None, duration=None, duration_start=self.early1, duration_end=self.early2, sum=0, period=None, period_start=None, period_end=None, groupby=None, ) ] with mock.patch.object(type(self.conn), 'get_meter_statistics', return_value=statistics): data = self.get_json('/meters/instance/statistics', q=[{'field': 'timestamp', 'op': 'le', 'value': self.early2.isoformat()}, {'field': 'resource_id', 'value': 'resource-id'}, {'field': 'search_offset', 'value': 10}]) self._assert_times_match(data[0]['duration_start'], self.early1) self._assert_times_match(data[0]['duration_end'], self.early2) ceilometer-6.1.5/ceilometer/tests/functional/api/v2/test_statistics_scenarios.py0000664000567000056710000023651013072744706031442 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test events statistics retrieval.""" import datetime from ceilometer.publisher import utils from ceilometer import sample from ceilometer.tests import db as tests_db from ceilometer.tests.functional.api import v2 class TestMaxProjectVolume(v2.FunctionalTest): PATH = '/meters/volume.size/statistics' def setUp(self): super(TestMaxProjectVolume, self).setUp() for i in range(3): s = sample.Sample( 'volume.size', 'gauge', 'GiB', 5 + i, 'user-id', 'project1', 'resource-id-%s' % i, timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), resource_metadata={'display_name': 'test-volume', 'tag': 'self.sample', }, source='source1', ) msg = utils.meter_message_from_counter( s, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) def test_no_time_bounds(self): data = self.get_json(self.PATH, q=[{'field': 'project_id', 'value': 'project1', }]) self.assertEqual(7, data[0]['max']) self.assertEqual(3, data[0]['count']) def test_start_timestamp(self): data = self.get_json(self.PATH, q=[{'field': 'project_id', 'value': 'project1', }, {'field': 'timestamp', 'op': 'ge', 'value': '2012-09-25T11:30:00', }, ]) self.assertEqual(7, data[0]['max']) self.assertEqual(2, data[0]['count']) def test_start_timestamp_after(self): data = self.get_json(self.PATH, q=[{'field': 'project_id', 'value': 'project1', }, {'field': 'timestamp', 'op': 'ge', 'value': '2012-09-25T12:34:00', }, ]) self.assertEqual([], data) def test_end_timestamp(self): data = self.get_json(self.PATH, q=[{'field': 'project_id', 'value': 'project1', }, {'field': 'timestamp', 'op': 'le', 'value': '2012-09-25T11:30:00', }, ]) self.assertEqual(5, data[0]['max']) self.assertEqual(1, data[0]['count']) def test_end_timestamp_before(self): data = self.get_json(self.PATH, q=[{'field': 'project_id', 'value': 'project1', }, {'field': 'timestamp', 'op': 'le', 'value': '2012-09-25T09:54:00', }, ]) self.assertEqual([], data) def test_start_end_timestamp(self): data = self.get_json(self.PATH, q=[{'field': 'project_id', 'value': 'project1', }, {'field': 'timestamp', 'op': 'ge', 'value': '2012-09-25T11:30:00', }, {'field': 'timestamp', 'op': 'le', 'value': '2012-09-25T11:32:00', }, ]) self.assertEqual(6, data[0]['max']) self.assertEqual(1, data[0]['count']) class TestMaxResourceVolume(v2.FunctionalTest): PATH = '/meters/volume.size/statistics' def setUp(self): super(TestMaxResourceVolume, self).setUp() for i in range(3): s = sample.Sample( 'volume.size', 'gauge', 'GiB', 5 + i, 'user-id', 'project1', 'resource-id', timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), resource_metadata={'display_name': 'test-volume', 'tag': 'self.sample', }, source='source1', ) msg = utils.meter_message_from_counter( s, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) def test_no_time_bounds(self): data = self.get_json(self.PATH, q=[{'field': 'resource_id', 'value': 'resource-id', }]) self.assertEqual(7, data[0]['max']) self.assertEqual(3, data[0]['count']) def test_no_time_bounds_with_period(self): data = self.get_json(self.PATH, q=[{'field': 'resource_id', 'value': 'resource-id'}], period=3600) self.assertEqual(3, len(data)) self.assertEqual(set([u'2012-09-25T10:30:00', u'2012-09-25T12:32:00', u'2012-09-25T11:31:00']), set(x['duration_start'] for x in data)) self.assertEqual(3600, data[0]['period']) self.assertEqual(set([u'2012-09-25T10:30:00', u'2012-09-25T11:30:00', u'2012-09-25T12:30:00']), set(x['period_start'] for x in data)) def test_period_with_negative_value(self): resp = self.get_json(self.PATH, expect_errors=True, q=[{'field': 'resource_id', 'value': 'resource-id'}], period=-1) self.assertEqual(400, resp.status_code) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2') def test_period_with_large_value(self): resp = self.get_json(self.PATH, expect_errors=True, q=[{'field': 'user_id', 'value': 'user-id'}], period=10000000000000) self.assertEqual(400, resp.status_code) self.assertIn(b"Invalid period", resp.body) def test_start_timestamp(self): data = self.get_json(self.PATH, q=[{'field': 'resource_id', 'value': 'resource-id', }, {'field': 'timestamp', 'op': 'ge', 'value': '2012-09-25T11:30:00', }, ]) self.assertEqual(7, data[0]['max']) self.assertEqual(2, data[0]['count']) def test_start_timestamp_after(self): data = self.get_json(self.PATH, q=[{'field': 'resource_id', 'value': 'resource-id', }, {'field': 'timestamp', 'op': 'ge', 'value': '2012-09-25T12:34:00', }, ]) self.assertEqual([], data) def test_end_timestamp(self): data = self.get_json(self.PATH, q=[{'field': 'resource_id', 'value': 'resource-id', }, {'field': 'timestamp', 'op': 'le', 'value': '2012-09-25T11:30:00', }, ]) self.assertEqual(5, data[0]['max']) self.assertEqual(1, data[0]['count']) def test_end_timestamp_before(self): data = self.get_json(self.PATH, q=[{'field': 'resource_id', 'value': 'resource-id', }, {'field': 'timestamp', 'op': 'le', 'value': '2012-09-25T09:54:00', }, ]) self.assertEqual([], data) def test_start_end_timestamp(self): data = self.get_json(self.PATH, q=[{'field': 'resource_id', 'value': 'resource-id', }, {'field': 'timestamp', 'op': 'ge', 'value': '2012-09-25T11:30:00', }, {'field': 'timestamp', 'op': 'le', 'value': '2012-09-25T11:32:00', }, ]) self.assertEqual(6, data[0]['max']) self.assertEqual(1, data[0]['count']) class TestSumProjectVolume(v2.FunctionalTest): PATH = '/meters/volume.size/statistics' def setUp(self): super(TestSumProjectVolume, self).setUp() for i in range(3): s = sample.Sample( 'volume.size', 'gauge', 'GiB', 5 + i, 'user-id', 'project1', 'resource-id-%s' % i, timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), resource_metadata={'display_name': 'test-volume', 'tag': 'self.sample', }, source='source1', ) msg = utils.meter_message_from_counter( s, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) def test_no_time_bounds(self): data = self.get_json(self.PATH, q=[{'field': 'project_id', 'value': 'project1', }]) expected = 5 + 6 + 7 self.assertEqual(expected, data[0]['sum']) self.assertEqual(3, data[0]['count']) def test_start_timestamp(self): data = self.get_json(self.PATH, q=[{'field': 'project_id', 'value': 'project1', }, {'field': 'timestamp', 'op': 'ge', 'value': '2012-09-25T11:30:00', }, ]) expected = 6 + 7 self.assertEqual(expected, data[0]['sum']) self.assertEqual(2, data[0]['count']) def test_start_timestamp_after(self): data = self.get_json(self.PATH, q=[{'field': 'project_id', 'value': 'project1', }, {'field': 'timestamp', 'op': 'ge', 'value': '2012-09-25T12:34:00', }, ]) self.assertEqual([], data) def test_end_timestamp(self): data = self.get_json(self.PATH, q=[{'field': 'project_id', 'value': 'project1', }, {'field': 'timestamp', 'op': 'le', 'value': '2012-09-25T11:30:00', }, ]) self.assertEqual(5, data[0]['sum']) self.assertEqual(1, data[0]['count']) def test_end_timestamp_before(self): data = self.get_json(self.PATH, q=[{'field': 'project_id', 'value': 'project1', }, {'field': 'timestamp', 'op': 'le', 'value': '2012-09-25T09:54:00', }, ]) self.assertEqual([], data) def test_start_end_timestamp(self): data = self.get_json(self.PATH, q=[{'field': 'project_id', 'value': 'project1', }, {'field': 'timestamp', 'op': 'ge', 'value': '2012-09-25T11:30:00', }, {'field': 'timestamp', 'op': 'le', 'value': '2012-09-25T11:32:00', }, ]) self.assertEqual(6, data[0]['sum']) self.assertEqual(1, data[0]['count']) class TestSumResourceVolume(v2.FunctionalTest): PATH = '/meters/volume.size/statistics' def setUp(self): super(TestSumResourceVolume, self).setUp() for i in range(3): s = sample.Sample( 'volume.size', 'gauge', 'GiB', 5 + i, 'user-id', 'project1', 'resource-id', timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), resource_metadata={'display_name': 'test-volume', 'tag': 'self.sample', }, source='source1', ) msg = utils.meter_message_from_counter( s, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) def test_no_time_bounds(self): data = self.get_json(self.PATH, q=[{'field': 'resource_id', 'value': 'resource-id', }]) self.assertEqual(5 + 6 + 7, data[0]['sum']) self.assertEqual(3, data[0]['count']) def test_no_time_bounds_with_period(self): data = self.get_json(self.PATH, q=[{'field': 'resource_id', 'value': 'resource-id'}], period=1800) self.assertEqual(3, len(data)) self.assertEqual(set([u'2012-09-25T10:30:00', u'2012-09-25T12:32:00', u'2012-09-25T11:31:00']), set(x['duration_start'] for x in data)) self.assertEqual(1800, data[0]['period']) self.assertEqual(set([u'2012-09-25T10:30:00', u'2012-09-25T11:30:00', u'2012-09-25T12:30:00']), set(x['period_start'] for x in data)) def test_start_timestamp(self): data = self.get_json(self.PATH, q=[{'field': 'resource_id', 'value': 'resource-id', }, {'field': 'timestamp', 'op': 'ge', 'value': '2012-09-25T11:30:00', }]) self.assertEqual(6 + 7, data[0]['sum']) self.assertEqual(2, data[0]['count']) def test_start_timestamp_with_period(self): data = self.get_json(self.PATH, q=[{'field': 'resource_id', 'value': 'resource-id'}, {'field': 'timestamp', 'op': 'ge', 'value': '2012-09-25T10:15:00'}], period=7200) self.assertEqual(2, len(data)) self.assertEqual(set([u'2012-09-25T10:30:00', u'2012-09-25T12:32:00']), set(x['duration_start'] for x in data)) self.assertEqual(7200, data[0]['period']) self.assertEqual(set([u'2012-09-25T10:15:00', u'2012-09-25T12:15:00']), set(x['period_start'] for x in data)) def test_start_timestamp_after(self): data = self.get_json(self.PATH, q=[{'field': 'resource_id', 'value': 'resource-id', }, {'field': 'timestamp', 'op': 'ge', 'value': '2012-09-25T12:34:00', }]) self.assertEqual([], data) def test_end_timestamp(self): data = self.get_json(self.PATH, q=[{'field': 'resource_id', 'value': 'resource-id', }, {'field': 'timestamp', 'op': 'le', 'value': '2012-09-25T11:30:00', }]) self.assertEqual(5, data[0]['sum']) self.assertEqual(1, data[0]['count']) def test_end_timestamp_before(self): data = self.get_json(self.PATH, q=[{'field': 'resource_id', 'value': 'resource-id', }, {'field': 'timestamp', 'op': 'le', 'value': '2012-09-25T09:54:00', }]) self.assertEqual([], data) def test_start_end_timestamp(self): data = self.get_json(self.PATH, q=[{'field': 'resource_id', 'value': 'resource-id', }, {'field': 'timestamp', 'op': 'ge', 'value': '2012-09-25T11:30:00', }, {'field': 'timestamp', 'op': 'lt', 'value': '2012-09-25T11:32:00', }]) self.assertEqual(6, data[0]['sum']) self.assertEqual(1, data[0]['count']) class TestGroupByInstance(v2.FunctionalTest): PATH = '/meters/instance/statistics' def setUp(self): super(TestGroupByInstance, self).setUp() test_sample_data = ( {'volume': 2, 'user': 'user-1', 'project': 'project-1', 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', 'source': 'source-2'}, {'volume': 2, 'user': 'user-1', 'project': 'project-2', 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', 'source': 'source-2'}, {'volume': 1, 'user': 'user-2', 'project': 'project-1', 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11), 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', 'source': 'source-1'}, {'volume': 1, 'user': 'user-2', 'project': 'project-1', 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source-1'}, {'volume': 2, 'user': 'user-2', 'project': 'project-1', 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source-1'}, {'volume': 4, 'user': 'user-2', 'project': 'project-2', 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source-1'}, {'volume': 4, 'user': 'user-3', 'project': 'project-1', 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', 'source': 'source-3'}, ) for test_sample in test_sample_data: c = sample.Sample( 'instance', sample.TYPE_CUMULATIVE, unit='s', volume=test_sample['volume'], user_id=test_sample['user'], project_id=test_sample['project'], resource_id=test_sample['resource'], timestamp=datetime.datetime(*test_sample['timestamp']), resource_metadata={'flavor': test_sample['metadata_flavor'], 'event': test_sample['metadata_event'], }, source=test_sample['source'], ) msg = utils.meter_message_from_counter( c, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) def test_group_by_user(self): data = self.get_json(self.PATH, groupby=['user_id']) groupby_keys_set = set(x for sub_dict in data for x in sub_dict['groupby'].keys()) groupby_vals_set = set(x for sub_dict in data for x in sub_dict['groupby'].values()) self.assertEqual(set(['user_id']), groupby_keys_set) self.assertEqual(set(['user-1', 'user-2', 'user-3']), groupby_vals_set) for r in data: grp = r['groupby'] if grp == {'user_id': 'user-1'}: self.assertEqual(2, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(2, r['max']) self.assertEqual(4, r['sum']) self.assertEqual(2, r['avg']) elif grp == {'user_id': 'user-2'}: self.assertEqual(4, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(1, r['min']) self.assertEqual(4, r['max']) self.assertEqual(8, r['sum']) self.assertEqual(2, r['avg']) elif grp == {'user_id': 'user-3'}: self.assertEqual(1, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(4, r['min']) self.assertEqual(4, r['max']) self.assertEqual(4, r['sum']) self.assertEqual(4, r['avg']) def test_group_by_resource(self): data = self.get_json(self.PATH, groupby=['resource_id']) groupby_keys_set = set(x for sub_dict in data for x in sub_dict['groupby'].keys()) groupby_vals_set = set(x for sub_dict in data for x in sub_dict['groupby'].values()) self.assertEqual(set(['resource_id']), groupby_keys_set) self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), groupby_vals_set) for r in data: grp = r['groupby'] if grp == {'resource_id': 'resource-1'}: self.assertEqual(3, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(2, r['max']) self.assertEqual(6, r['sum']) self.assertEqual(2, r['avg']) elif grp == {'resource_id': 'resource-2'}: self.assertEqual(3, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(1, r['min']) self.assertEqual(4, r['max']) self.assertEqual(6, r['sum']) self.assertEqual(2, r['avg']) elif grp == {'resource_id': 'resource-3'}: self.assertEqual(1, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(4, r['min']) self.assertEqual(4, r['max']) self.assertEqual(4, r['sum']) self.assertEqual(4, r['avg']) def test_group_by_project(self): data = self.get_json(self.PATH, groupby=['project_id']) groupby_keys_set = set(x for sub_dict in data for x in sub_dict['groupby'].keys()) groupby_vals_set = set(x for sub_dict in data for x in sub_dict['groupby'].values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) for r in data: grp = r['groupby'] if grp == {'project_id': 'project-1'}: self.assertEqual(5, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(1, r['min']) self.assertEqual(4, r['max']) self.assertEqual(10, r['sum']) self.assertEqual(2, r['avg']) elif grp == {'project_id': 'project-2'}: self.assertEqual(2, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(4, r['max']) self.assertEqual(6, r['sum']) self.assertEqual(3, r['avg']) def test_group_by_unknown_field(self): response = self.get_json(self.PATH, expect_errors=True, groupby=['wtf']) self.assertEqual(400, response.status_code) def test_group_by_multiple_regular(self): data = self.get_json(self.PATH, groupby=['user_id', 'resource_id']) groupby_keys_set = set(x for sub_dict in data for x in sub_dict['groupby'].keys()) groupby_vals_set = set(x for sub_dict in data for x in sub_dict['groupby'].values()) self.assertEqual(set(['user_id', 'resource_id']), groupby_keys_set) self.assertEqual(set(['user-1', 'user-2', 'user-3', 'resource-1', 'resource-2', 'resource-3']), groupby_vals_set) for r in data: grp = r['groupby'] if grp == {'user_id': 'user-1', 'resource_id': 'resource-1'}: self.assertEqual(2, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(2, r['max']) self.assertEqual(4, r['sum']) self.assertEqual(2, r['avg']) elif grp == {'user_id': 'user-2', 'resource_id': 'resource-1'}: self.assertEqual(1, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(2, r['max']) self.assertEqual(2, r['sum']) self.assertEqual(2, r['avg']) elif grp == {'user_id': 'user-2', 'resource_id': 'resource-2'}: self.assertEqual(3, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(1, r['min']) self.assertEqual(4, r['max']) self.assertEqual(6, r['sum']) self.assertEqual(2, r['avg']) elif grp == {'user_id': 'user-3', 'resource_id': 'resource-3'}: self.assertEqual(1, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(4, r['min']) self.assertEqual(4, r['max']) self.assertEqual(4, r['sum']) self.assertEqual(4, r['avg']) else: self.assertNotEqual(grp, {'user_id': 'user-1', 'resource_id': 'resource-2'}) self.assertNotEqual(grp, {'user_id': 'user-1', 'resource_id': 'resource-3'}) self.assertNotEqual(grp, {'user_id': 'user-2', 'resource_id': 'resource-3'}) self.assertNotEqual(grp, {'user_id': 'user-3', 'resource_id': 'resource-1'}) self.assertNotEqual(grp, {'user_id': 'user-3', 'resource_id': 'resource-2'}) def test_group_by_with_query_filter(self): data = self.get_json(self.PATH, q=[{'field': 'project_id', 'op': 'eq', 'value': 'project-1'}], groupby=['resource_id']) groupby_keys_set = set(x for sub_dict in data for x in sub_dict['groupby'].keys()) groupby_vals_set = set(x for sub_dict in data for x in sub_dict['groupby'].values()) self.assertEqual(set(['resource_id']), groupby_keys_set) self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), groupby_vals_set) for r in data: grp = r['groupby'] if grp == {'resource_id': 'resource-1'}: self.assertEqual(2, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(2, r['max']) self.assertEqual(4, r['sum']) self.assertEqual(2, r['avg']) elif grp == {'resource_id': 'resource-2'}: self.assertEqual(2, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(1, r['min']) self.assertEqual(1, r['max']) self.assertEqual(2, r['sum']) self.assertEqual(1, r['avg']) elif grp == {'resource_id': 'resource-3'}: self.assertEqual(1, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(4, r['min']) self.assertEqual(4, r['max']) self.assertEqual(4, r['sum']) self.assertEqual(4, r['avg']) def test_group_by_with_query_filter_multiple(self): data = self.get_json(self.PATH, q=[{'field': 'user_id', 'op': 'eq', 'value': 'user-2'}, {'field': 'source', 'op': 'eq', 'value': 'source-1'}], groupby=['project_id', 'resource_id']) groupby_keys_set = set(x for sub_dict in data for x in sub_dict['groupby'].keys()) groupby_vals_set = set(x for sub_dict in data for x in sub_dict['groupby'].values()) self.assertEqual(set(['project_id', 'resource_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2', 'resource-1', 'resource-2']), groupby_vals_set) for r in data: grp = r['groupby'] if grp == {'project_id': 'project-1', 'resource_id': 'resource-1'}: self.assertEqual(1, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(2, r['max']) self.assertEqual(2, r['sum']) self.assertEqual(2, r['avg']) elif grp == {'project_id': 'project-1', 'resource_id': 'resource-2'}: self.assertEqual(2, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(1, r['min']) self.assertEqual(1, r['max']) self.assertEqual(2, r['sum']) self.assertEqual(1, r['avg']) elif grp == {'project_id': 'project-2', 'resource_id': 'resource-2'}: self.assertEqual(1, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(4, r['min']) self.assertEqual(4, r['max']) self.assertEqual(4, r['sum']) self.assertEqual(4, r['avg']) else: self.assertNotEqual(grp, {'project_id': 'project-2', 'resource_id': 'resource-1'}) def test_group_by_with_period(self): data = self.get_json(self.PATH, groupby=['project_id'], period=7200) groupby_keys_set = set(x for sub_dict in data for x in sub_dict['groupby'].keys()) groupby_vals_set = set(x for sub_dict in data for x in sub_dict['groupby'].values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) period_start_set = set(sub_dict['period_start'] for sub_dict in data) period_start_valid = set([u'2013-08-01T10:11:00', u'2013-08-01T14:11:00', u'2013-08-01T16:11:00']) self.assertEqual(period_start_valid, period_start_set) for r in data: grp = r['groupby'] period_start = r['period_start'] if (grp == {'project_id': 'project-1'} and period_start == u'2013-08-01T10:11:00'): self.assertEqual(3, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(1, r['min']) self.assertEqual(4, r['max']) self.assertEqual(6, r['sum']) self.assertEqual(2, r['avg']) self.assertEqual(4260, r['duration']) self.assertEqual(u'2013-08-01T10:11:00', r['duration_start']) self.assertEqual(u'2013-08-01T11:22:00', r['duration_end']) self.assertEqual(7200, r['period']) self.assertEqual(u'2013-08-01T12:11:00', r['period_end']) elif (grp == {'project_id': 'project-1'} and period_start == u'2013-08-01T14:11:00'): self.assertEqual(2, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(2, r['max']) self.assertEqual(4, r['sum']) self.assertEqual(2, r['avg']) self.assertEqual(4260, r['duration']) self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) self.assertEqual(u'2013-08-01T16:10:00', r['duration_end']) self.assertEqual(7200, r['period']) self.assertEqual(u'2013-08-01T16:11:00', r['period_end']) elif (grp == {'project_id': 'project-2'} and period_start == u'2013-08-01T14:11:00'): self.assertEqual(1, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(2, r['max']) self.assertEqual(2, r['sum']) self.assertEqual(2, r['avg']) self.assertEqual(0, r['duration']) self.assertEqual(u'2013-08-01T15:37:00', r['duration_start']) self.assertEqual(u'2013-08-01T15:37:00', r['duration_end']) self.assertEqual(7200, r['period']) self.assertEqual(u'2013-08-01T16:11:00', r['period_end']) elif (grp == {'project_id': 'project-2'} and period_start == u'2013-08-01T16:11:00'): self.assertEqual(1, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(4, r['min']) self.assertEqual(4, r['max']) self.assertEqual(4, r['sum']) self.assertEqual(4, r['avg']) self.assertEqual(0, r['duration']) self.assertEqual(u'2013-08-01T17:28:00', r['duration_start']) self.assertEqual(u'2013-08-01T17:28:00', r['duration_end']) self.assertEqual(7200, r['period']) self.assertEqual(u'2013-08-01T18:11:00', r['period_end']) else: self.assertNotEqual([grp, period_start], [{'project_id': 'project-1'}, u'2013-08-01T16:11:00']) self.assertNotEqual([grp, period_start], [{'project_id': 'project-2'}, u'2013-08-01T10:11:00']) def test_group_by_with_query_filter_and_period(self): data = self.get_json(self.PATH, q=[{'field': 'source', 'op': 'eq', 'value': 'source-1'}], groupby=['project_id'], period=7200) groupby_keys_set = set(x for sub_dict in data for x in sub_dict['groupby'].keys()) groupby_vals_set = set(x for sub_dict in data for x in sub_dict['groupby'].values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) period_start_set = set(sub_dict['period_start'] for sub_dict in data) period_start_valid = set([u'2013-08-01T10:11:00', u'2013-08-01T14:11:00', u'2013-08-01T16:11:00']) self.assertEqual(period_start_valid, period_start_set) for r in data: grp = r['groupby'] period_start = r['period_start'] if (grp == {'project_id': 'project-1'} and period_start == u'2013-08-01T10:11:00'): self.assertEqual(2, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(1, r['min']) self.assertEqual(1, r['max']) self.assertEqual(2, r['sum']) self.assertEqual(1, r['avg']) self.assertEqual(1740, r['duration']) self.assertEqual(u'2013-08-01T10:11:00', r['duration_start']) self.assertEqual(u'2013-08-01T10:40:00', r['duration_end']) self.assertEqual(7200, r['period']) self.assertEqual(u'2013-08-01T12:11:00', r['period_end']) elif (grp == {'project_id': 'project-1'} and period_start == u'2013-08-01T14:11:00'): self.assertEqual(1, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(2, r['max']) self.assertEqual(2, r['sum']) self.assertEqual(2, r['avg']) self.assertEqual(0, r['duration']) self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) self.assertEqual(u'2013-08-01T14:59:00', r['duration_end']) self.assertEqual(7200, r['period']) self.assertEqual(u'2013-08-01T16:11:00', r['period_end']) elif (grp == {'project_id': 'project-2'} and period_start == u'2013-08-01T16:11:00'): self.assertEqual(1, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(4, r['min']) self.assertEqual(4, r['max']) self.assertEqual(4, r['sum']) self.assertEqual(4, r['avg']) self.assertEqual(0, r['duration']) self.assertEqual(u'2013-08-01T17:28:00', r['duration_start']) self.assertEqual(u'2013-08-01T17:28:00', r['duration_end']) self.assertEqual(7200, r['period']) self.assertEqual(u'2013-08-01T18:11:00', r['period_end']) else: self.assertNotEqual([grp, period_start], [{'project_id': 'project-1'}, u'2013-08-01T16:11:00']) self.assertNotEqual([grp, period_start], [{'project_id': 'project-2'}, u'2013-08-01T10:11:00']) self.assertNotEqual([grp, period_start], [{'project_id': 'project-2'}, u'2013-08-01T14:11:00']) def test_group_by_start_timestamp_after(self): data = self.get_json(self.PATH, q=[{'field': 'timestamp', 'op': 'ge', 'value': '2013-08-01T17:28:01'}], groupby=['project_id']) self.assertEqual([], data) def test_group_by_end_timestamp_before(self): data = self.get_json(self.PATH, q=[{'field': 'timestamp', 'op': 'le', 'value': '2013-08-01T10:10:59'}], groupby=['project_id']) self.assertEqual([], data) def test_group_by_start_timestamp(self): data = self.get_json(self.PATH, q=[{'field': 'timestamp', 'op': 'ge', 'value': '2013-08-01T14:58:00'}], groupby=['project_id']) groupby_keys_set = set(x for sub_dict in data for x in sub_dict['groupby'].keys()) groupby_vals_set = set(x for sub_dict in data for x in sub_dict['groupby'].values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) for r in data: grp = r['groupby'] if grp == {'project_id': 'project-1'}: self.assertEqual(2, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(2, r['max']) self.assertEqual(4, r['sum']) self.assertEqual(2, r['avg']) elif grp == {'project_id': 'project-2'}: self.assertEqual(2, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(4, r['max']) self.assertEqual(6, r['sum']) self.assertEqual(3, r['avg']) def test_group_by_end_timestamp(self): data = self.get_json(self.PATH, q=[{'field': 'timestamp', 'op': 'le', 'value': '2013-08-01T11:45:00'}], groupby=['project_id']) groupby_keys_set = set(x for sub_dict in data for x in sub_dict['groupby'].keys()) groupby_vals_set = set(x for sub_dict in data for x in sub_dict['groupby'].values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1']), groupby_vals_set) for r in data: grp = r['groupby'] if grp == {'project_id': 'project-1'}: self.assertEqual(3, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(1, r['min']) self.assertEqual(4, r['max']) self.assertEqual(6, r['sum']) self.assertEqual(2, r['avg']) def test_group_by_start_end_timestamp(self): data = self.get_json(self.PATH, q=[{'field': 'timestamp', 'op': 'ge', 'value': '2013-08-01T08:17:03'}, {'field': 'timestamp', 'op': 'le', 'value': '2013-08-01T23:59:59'}], groupby=['project_id']) groupby_keys_set = set(x for sub_dict in data for x in sub_dict['groupby'].keys()) groupby_vals_set = set(x for sub_dict in data for x in sub_dict['groupby'].values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) for r in data: grp = r['groupby'] if grp == {'project_id': 'project-1'}: self.assertEqual(5, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(1, r['min']) self.assertEqual(4, r['max']) self.assertEqual(10, r['sum']) self.assertEqual(2, r['avg']) elif grp == {'project_id': 'project-2'}: self.assertEqual(2, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(4, r['max']) self.assertEqual(6, r['sum']) self.assertEqual(3, r['avg']) def test_group_by_start_end_timestamp_with_query_filter(self): data = self.get_json(self.PATH, q=[{'field': 'project_id', 'op': 'eq', 'value': 'project-1'}, {'field': 'timestamp', 'op': 'ge', 'value': '2013-08-01T11:01:00'}, {'field': 'timestamp', 'op': 'le', 'value': '2013-08-01T20:00:00'}], groupby=['resource_id']) groupby_keys_set = set(x for sub_dict in data for x in sub_dict['groupby'].keys()) groupby_vals_set = set(x for sub_dict in data for x in sub_dict['groupby'].values()) self.assertEqual(set(['resource_id']), groupby_keys_set) self.assertEqual(set(['resource-1', 'resource-3']), groupby_vals_set) for r in data: grp = r['groupby'] if grp == {'resource_id': 'resource-1'}: self.assertEqual(2, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(2, r['max']) self.assertEqual(4, r['sum']) self.assertEqual(2, r['avg']) elif grp == {'resource_id': 'resource-3'}: self.assertEqual(1, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(4, r['min']) self.assertEqual(4, r['max']) self.assertEqual(4, r['sum']) self.assertEqual(4, r['avg']) def test_group_by_start_end_timestamp_with_period(self): data = self.get_json(self.PATH, q=[{'field': 'timestamp', 'op': 'ge', 'value': '2013-08-01T14:00:00'}, {'field': 'timestamp', 'op': 'le', 'value': '2013-08-01T17:00:00'}], groupby=['project_id'], period=3600) groupby_keys_set = set(x for sub_dict in data for x in sub_dict['groupby'].keys()) groupby_vals_set = set(x for sub_dict in data for x in sub_dict['groupby'].values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) period_start_set = set(sub_dict['period_start'] for sub_dict in data) period_start_valid = set([u'2013-08-01T14:00:00', u'2013-08-01T15:00:00', u'2013-08-01T16:00:00']) self.assertEqual(period_start_valid, period_start_set) for r in data: grp = r['groupby'] period_start = r['period_start'] if (grp == {'project_id': 'project-1'} and period_start == u'2013-08-01T14:00:00'): self.assertEqual(1, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(2, r['max']) self.assertEqual(2, r['sum']) self.assertEqual(2, r['avg']) self.assertEqual(0, r['duration']) self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) self.assertEqual(u'2013-08-01T14:59:00', r['duration_end']) self.assertEqual(3600, r['period']) self.assertEqual(u'2013-08-01T15:00:00', r['period_end']) elif (grp == {'project_id': 'project-1'} and period_start == u'2013-08-01T16:00:00'): self.assertEqual(1, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(2, r['max']) self.assertEqual(2, r['sum']) self.assertEqual(2, r['avg']) self.assertEqual(0, r['duration']) self.assertEqual(u'2013-08-01T16:10:00', r['duration_start']) self.assertEqual(u'2013-08-01T16:10:00', r['duration_end']) self.assertEqual(3600, r['period']) self.assertEqual(u'2013-08-01T17:00:00', r['period_end']) elif (grp == {'project_id': 'project-2'} and period_start == u'2013-08-01T15:00:00'): self.assertEqual(1, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(2, r['max']) self.assertEqual(2, r['sum']) self.assertEqual(2, r['avg']) self.assertEqual(0, r['duration']) self.assertEqual(u'2013-08-01T15:37:00', r['duration_start']) self.assertEqual(u'2013-08-01T15:37:00', r['duration_end']) self.assertEqual(3600, r['period']) self.assertEqual(u'2013-08-01T16:00:00', r['period_end']) else: self.assertNotEqual([grp, period_start], [{'project_id': 'project-1'}, u'2013-08-01T15:00:00']) self.assertNotEqual([grp, period_start], [{'project_id': 'project-2'}, u'2013-08-01T14:00:00']) self.assertNotEqual([grp, period_start], [{'project_id': 'project-2'}, u'2013-08-01T16:00:00']) def test_group_by_start_end_timestamp_with_query_filter_and_period(self): data = self.get_json(self.PATH, q=[{'field': 'source', 'op': 'eq', 'value': 'source-1'}, {'field': 'timestamp', 'op': 'ge', 'value': '2013-08-01T10:00:00'}, {'field': 'timestamp', 'op': 'le', 'value': '2013-08-01T18:00:00'}], groupby=['project_id'], period=7200) groupby_keys_set = set(x for sub_dict in data for x in sub_dict['groupby'].keys()) groupby_vals_set = set(x for sub_dict in data for x in sub_dict['groupby'].values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) period_start_set = set(sub_dict['period_start'] for sub_dict in data) period_start_valid = set([u'2013-08-01T10:00:00', u'2013-08-01T14:00:00', u'2013-08-01T16:00:00']) self.assertEqual(period_start_valid, period_start_set) for r in data: grp = r['groupby'] period_start = r['period_start'] if (grp == {'project_id': 'project-1'} and period_start == u'2013-08-01T10:00:00'): self.assertEqual(2, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(1, r['min']) self.assertEqual(1, r['max']) self.assertEqual(2, r['sum']) self.assertEqual(1, r['avg']) self.assertEqual(1740, r['duration']) self.assertEqual(u'2013-08-01T10:11:00', r['duration_start']) self.assertEqual(u'2013-08-01T10:40:00', r['duration_end']) self.assertEqual(7200, r['period']) self.assertEqual(u'2013-08-01T12:00:00', r['period_end']) elif (grp == {'project_id': 'project-1'} and period_start == u'2013-08-01T14:00:00'): self.assertEqual(1, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(2, r['max']) self.assertEqual(2, r['sum']) self.assertEqual(2, r['avg']) self.assertEqual(0, r['duration']) self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) self.assertEqual(u'2013-08-01T14:59:00', r['duration_end']) self.assertEqual(7200, r['period']) self.assertEqual(u'2013-08-01T16:00:00', r['period_end']) elif (grp == {'project_id': 'project-2'} and period_start == u'2013-08-01T16:00:00'): self.assertEqual(1, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(4, r['min']) self.assertEqual(4, r['max']) self.assertEqual(4, r['sum']) self.assertEqual(4, r['avg']) self.assertEqual(0, r['duration']) self.assertEqual(u'2013-08-01T17:28:00', r['duration_start']) self.assertEqual(u'2013-08-01T17:28:00', r['duration_end']) self.assertEqual(7200, r['period']) self.assertEqual(u'2013-08-01T18:00:00', r['period_end']) else: self.assertNotEqual([grp, period_start], [{'project_id': 'project-1'}, u'2013-08-01T16:00:00']) self.assertNotEqual([grp, period_start], [{'project_id': 'project-2'}, u'2013-08-01T10:00:00']) self.assertNotEqual([grp, period_start], [{'project_id': 'project-2'}, u'2013-08-01T14:00:00']) @tests_db.run_with('mongodb', 'hbase', 'db2') class TestGroupBySource(v2.FunctionalTest): # FIXME(terriyu): We have to put test_group_by_source in its own class # because SQLAlchemy currently doesn't support group by source statistics. # When group by source is supported in SQLAlchemy, this test should be # moved to TestGroupByInstance with all the other group by statistics # tests. PATH = '/meters/instance/statistics' def setUp(self): super(TestGroupBySource, self).setUp() test_sample_data = ( {'volume': 2, 'user': 'user-1', 'project': 'project-1', 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', 'source': 'source-2'}, {'volume': 2, 'user': 'user-1', 'project': 'project-2', 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', 'source': 'source-2'}, {'volume': 1, 'user': 'user-2', 'project': 'project-1', 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11), 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', 'source': 'source-1'}, {'volume': 1, 'user': 'user-2', 'project': 'project-1', 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source-1'}, {'volume': 2, 'user': 'user-2', 'project': 'project-1', 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source-1'}, {'volume': 4, 'user': 'user-2', 'project': 'project-2', 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source-1'}, {'volume': 4, 'user': 'user-3', 'project': 'project-1', 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', 'source': 'source-3'}, ) for test_sample in test_sample_data: c = sample.Sample( 'instance', sample.TYPE_CUMULATIVE, unit='s', volume=test_sample['volume'], user_id=test_sample['user'], project_id=test_sample['project'], resource_id=test_sample['resource'], timestamp=datetime.datetime(*test_sample['timestamp']), resource_metadata={'flavor': test_sample['metadata_flavor'], 'event': test_sample['metadata_event'], }, source=test_sample['source'], ) msg = utils.meter_message_from_counter( c, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) def tearDown(self): self.conn.clear() super(TestGroupBySource, self).tearDown() def test_group_by_source(self): data = self.get_json(self.PATH, groupby=['source']) groupby_keys_set = set(x for sub_dict in data for x in sub_dict['groupby'].keys()) groupby_vals_set = set(x for sub_dict in data for x in sub_dict['groupby'].values()) self.assertEqual(set(['source']), groupby_keys_set) self.assertEqual(set(['source-1', 'source-2', 'source-3']), groupby_vals_set) for r in data: grp = r['groupby'] if grp == {'source': 'source-1'}: self.assertEqual(4, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(1, r['min']) self.assertEqual(4, r['max']) self.assertEqual(8, r['sum']) self.assertEqual(2, r['avg']) elif grp == {'source': 'source-2'}: self.assertEqual(2, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(2, r['min']) self.assertEqual(2, r['max']) self.assertEqual(4, r['sum']) self.assertEqual(2, r['avg']) elif grp == {'source': 'source-3'}: self.assertEqual(1, r['count']) self.assertEqual('s', r['unit']) self.assertEqual(4, r['min']) self.assertEqual(4, r['max']) self.assertEqual(4, r['sum']) self.assertEqual(4, r['avg']) class TestSelectableAggregates(v2.FunctionalTest): PATH = '/meters/instance/statistics' def setUp(self): super(TestSelectableAggregates, self).setUp() test_sample_data = ( {'volume': 2, 'user': 'user-1', 'project': 'project-1', 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', 'source': 'source'}, {'volume': 2, 'user': 'user-2', 'project': 'project-2', 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 15, 37), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', 'source': 'source'}, {'volume': 1, 'user': 'user-2', 'project': 'project-2', 'resource': 'resource-5', 'timestamp': (2013, 8, 1, 10, 11), 'metadata_flavor': 'm1.medium', 'metadata_event': 'event-2', 'source': 'source'}, {'volume': 2, 'user': 'user-1', 'project': 'project-1', 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source'}, {'volume': 2, 'user': 'user-2', 'project': 'project-2', 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 14, 59), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source'}, {'volume': 5, 'user': 'user-1', 'project': 'project-1', 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source'}, {'volume': 4, 'user': 'user-2', 'project': 'project-2', 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source'}, {'volume': 9, 'user': 'user-3', 'project': 'project-3', 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 11, 59), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-3', 'source': 'source'}, ) for test_sample in test_sample_data: c = sample.Sample( 'instance', sample.TYPE_GAUGE, unit='instance', volume=test_sample['volume'], user_id=test_sample['user'], project_id=test_sample['project'], resource_id=test_sample['resource'], timestamp=datetime.datetime(*test_sample['timestamp']), resource_metadata={'flavor': test_sample['metadata_flavor'], 'event': test_sample['metadata_event'], }, source=test_sample['source'], ) msg = utils.meter_message_from_counter( c, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) def _do_test_per_tenant_selectable_standard_aggregate(self, aggregate, expected_values): agg_args = {'aggregate.func': aggregate} data = self.get_json(self.PATH, groupby=['project_id'], **agg_args) groupby_keys_set = set(x for sub_dict in data for x in sub_dict['groupby'].keys()) groupby_vals_set = set(x for sub_dict in data for x in sub_dict['groupby'].values()) self.assertEqual(set(['project_id']), groupby_keys_set) projects = ['project-1', 'project-2', 'project-3'] self.assertEqual(set(projects), groupby_vals_set) standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) for r in data: grp = r['groupby'] for project in projects: if grp == {'project_id': project}: expected = expected_values[projects.index(project)] self.assertEqual('instance', r['unit']) self.assertAlmostEqual(r[aggregate], expected) self.assertIn('aggregate', r) self.assertIn(aggregate, r['aggregate']) self.assertAlmostEqual(r['aggregate'][aggregate], expected) for a in standard_aggregates - set([aggregate]): self.assertNotIn(a, r) def test_per_tenant_selectable_max(self): self._do_test_per_tenant_selectable_standard_aggregate('max', [5, 4, 9]) def test_per_tenant_selectable_min(self): self._do_test_per_tenant_selectable_standard_aggregate('min', [2, 1, 9]) def test_per_tenant_selectable_sum(self): self._do_test_per_tenant_selectable_standard_aggregate('sum', [9, 9, 9]) def test_per_tenant_selectable_avg(self): self._do_test_per_tenant_selectable_standard_aggregate('avg', [3, 2.25, 9]) def test_per_tenant_selectable_count(self): self._do_test_per_tenant_selectable_standard_aggregate('count', [3, 4, 1]) def test_per_tenant_selectable_parameterized_aggregate(self): agg_args = {'aggregate.func': 'cardinality', 'aggregate.param': 'resource_id'} data = self.get_json(self.PATH, groupby=['project_id'], **agg_args) groupby_keys_set = set(x for sub_dict in data for x in sub_dict['groupby'].keys()) groupby_vals_set = set(x for sub_dict in data for x in sub_dict['groupby'].values()) self.assertEqual(set(['project_id']), groupby_keys_set) projects = ['project-1', 'project-2', 'project-3'] self.assertEqual(set(projects), groupby_vals_set) aggregate = 'cardinality/resource_id' expected_values = [2.0, 3.0, 1.0] standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) for r in data: grp = r['groupby'] for project in projects: if grp == {'project_id': project}: expected = expected_values[projects.index(project)] self.assertEqual('instance', r['unit']) self.assertNotIn(aggregate, r) self.assertIn('aggregate', r) self.assertIn(aggregate, r['aggregate']) self.assertEqual(expected, r['aggregate'][aggregate]) for a in standard_aggregates: self.assertNotIn(a, r) def test_large_quantum_selectable_parameterized_aggregate(self): # add a large number of datapoints that won't impact on cardinality # if the computation logic is tolerant of different DB behavior on # larger numbers of samples per-period for i in range(200): s = sample.Sample( 'instance', sample.TYPE_GAUGE, unit='instance', volume=i * 1.0, user_id='user-1', project_id='project-1', resource_id='resource-1', timestamp=datetime.datetime(2013, 8, 1, 11, i % 60), resource_metadata={'flavor': 'm1.tiny', 'event': 'event-1', }, source='source', ) msg = utils.meter_message_from_counter( s, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) agg_args = {'aggregate.func': 'cardinality', 'aggregate.param': 'resource_id'} data = self.get_json(self.PATH, **agg_args) aggregate = 'cardinality/resource_id' expected_value = 5.0 standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) r = data[0] self.assertNotIn(aggregate, r) self.assertIn('aggregate', r) self.assertIn(aggregate, r['aggregate']) self.assertEqual(expected_value, r['aggregate'][aggregate]) for a in standard_aggregates: self.assertNotIn(a, r) def test_repeated_unparameterized_aggregate(self): agg_params = 'aggregate.func=count&aggregate.func=count' data = self.get_json(self.PATH, override_params=agg_params) aggregate = 'count' expected_value = 8.0 standard_aggregates = set(['min', 'max', 'sum', 'avg']) r = data[0] self.assertIn(aggregate, r) self.assertEqual(expected_value, r[aggregate]) self.assertIn('aggregate', r) self.assertIn(aggregate, r['aggregate']) self.assertEqual(expected_value, r['aggregate'][aggregate]) for a in standard_aggregates: self.assertNotIn(a, r) def test_fully_repeated_parameterized_aggregate(self): agg_params = ('aggregate.func=cardinality&' 'aggregate.param=resource_id&' 'aggregate.func=cardinality&' 'aggregate.param=resource_id&') data = self.get_json(self.PATH, override_params=agg_params) aggregate = 'cardinality/resource_id' expected_value = 5.0 standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) r = data[0] self.assertIn('aggregate', r) self.assertNotIn(aggregate, r) self.assertIn(aggregate, r['aggregate']) self.assertEqual(expected_value, r['aggregate'][aggregate]) for a in standard_aggregates: self.assertNotIn(a, r) def test_partially_repeated_parameterized_aggregate(self): agg_params = ('aggregate.func=cardinality&' 'aggregate.param=resource_id&' 'aggregate.func=cardinality&' 'aggregate.param=project_id&') data = self.get_json(self.PATH, override_params=agg_params) expected_values = {'cardinality/resource_id': 5.0, 'cardinality/project_id': 3.0} standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) r = data[0] self.assertIn('aggregate', r) for aggregate in expected_values.keys(): self.assertNotIn(aggregate, r) self.assertIn(aggregate, r['aggregate']) self.assertEqual(expected_values[aggregate], r['aggregate'][aggregate]) for a in standard_aggregates: self.assertNotIn(a, r) def test_bad_selectable_parameterized_aggregate(self): agg_args = {'aggregate.func': 'cardinality', 'aggregate.param': 'injection_attack'} resp = self.get_json(self.PATH, status=[400], groupby=['project_id'], **agg_args) self.assertIn('error_message', resp) self.assertEqual(resp['error_message'].get('faultcode'), 'Client') self.assertEqual(resp['error_message'].get('faultstring'), 'Bad aggregate: cardinality.injection_attack') @tests_db.run_with('mongodb', 'hbase', 'db2') class TestUnparameterizedAggregates(v2.FunctionalTest): # We put the stddev test case in a separate class so that we # can easily exclude the sqlalchemy scenario, as sqlite doesn't # support the stddev_pop function and fails ungracefully with # OperationalError when it is used. However we still want to # test the corresponding functionality in the mongo driver. # For hbase & db2, the skip on NotImplementedError logic works # in the usual way. PATH = '/meters/instance/statistics' def setUp(self): super(TestUnparameterizedAggregates, self).setUp() test_sample_data = ( {'volume': 2, 'user': 'user-1', 'project': 'project-1', 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', 'source': 'source'}, {'volume': 2, 'user': 'user-2', 'project': 'project-2', 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 15, 37), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', 'source': 'source'}, {'volume': 1, 'user': 'user-2', 'project': 'project-2', 'resource': 'resource-5', 'timestamp': (2013, 8, 1, 10, 11), 'metadata_flavor': 'm1.medium', 'metadata_event': 'event-2', 'source': 'source'}, {'volume': 2, 'user': 'user-1', 'project': 'project-1', 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source'}, {'volume': 2, 'user': 'user-2', 'project': 'project-2', 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 14, 59), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source'}, {'volume': 5, 'user': 'user-1', 'project': 'project-1', 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source'}, {'volume': 4, 'user': 'user-2', 'project': 'project-2', 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source'}, {'volume': 9, 'user': 'user-3', 'project': 'project-3', 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 11, 59), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-3', 'source': 'source'}, ) for test_sample in test_sample_data: c = sample.Sample( 'instance', sample.TYPE_GAUGE, unit='instance', volume=test_sample['volume'], user_id=test_sample['user'], project_id=test_sample['project'], resource_id=test_sample['resource'], timestamp=datetime.datetime(*test_sample['timestamp']), resource_metadata={'flavor': test_sample['metadata_flavor'], 'event': test_sample['metadata_event'], }, source=test_sample['source'], ) msg = utils.meter_message_from_counter( c, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) def test_per_tenant_selectable_unparameterized_aggregate(self): agg_args = {'aggregate.func': 'stddev'} data = self.get_json(self.PATH, groupby=['project_id'], **agg_args) groupby_keys_set = set(x for sub_dict in data for x in sub_dict['groupby'].keys()) groupby_vals_set = set(x for sub_dict in data for x in sub_dict['groupby'].values()) self.assertEqual(set(['project_id']), groupby_keys_set) projects = ['project-1', 'project-2', 'project-3'] self.assertEqual(set(projects), groupby_vals_set) aggregate = 'stddev' expected_values = [1.4142, 1.0897, 0.0] standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) for r in data: grp = r['groupby'] for project in projects: if grp == {'project_id': project}: expected = expected_values[projects.index(project)] self.assertEqual('instance', r['unit']) self.assertNotIn(aggregate, r) self.assertIn('aggregate', r) self.assertIn(aggregate, r['aggregate']) self.assertAlmostEqual(r['aggregate'][aggregate], expected, places=4) for a in standard_aggregates: self.assertNotIn(a, r) @tests_db.run_with('mongodb') class TestBigValueStatistics(v2.FunctionalTest): PATH = '/meters/volume.size/statistics' def setUp(self): super(TestBigValueStatistics, self).setUp() for i in range(0, 3): s = sample.Sample( 'volume.size', 'gauge', 'GiB', (i + 1) * (10 ** 12), 'user-id', 'project1', 'resource-id', timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), resource_metadata={'display_name': 'test-volume', 'tag': 'self.sample', }, source='source1', ) msg = utils.meter_message_from_counter( s, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) def test_big_value_statistics(self): data = self.get_json(self.PATH) expected_values = {'count': 3, 'min': 10 ** 12, 'max': 3 * 10 ** 12, 'sum': 6 * 10 ** 12, 'avg': 2 * 10 ** 12} self.assertEqual(1, len(data)) for d in data: for name, expected_value in expected_values.items(): self.assertIn(name, d) self.assertEqual(expected_value, d[name]) ceilometer-6.1.5/ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py0000664000567000056710000003740713072744703031762 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test listing raw events. """ import copy import datetime import os import mock from oslo_utils import fileutils from oslo_utils import timeutils from oslotest import mockpatch import six from ceilometer.tests.functional.api import v2 class TestPostSamples(v2.FunctionalTest): def fake_notifier_sample(self, ctxt, event_type, payload): samples = payload['samples'] for m in samples: del m['message_signature'] self.published.append(samples) def _make_app(self, enable_acl=False): content = ('{"context_is_project": "project_id:%(project_id)s",' '"default" : "!",' '"telemetry:create_samples": ""}') if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='policy', suffix='.json') self.CONF.set_override("policy_file", self.tempfile, group='oslo_policy') return super(TestPostSamples, self)._make_app() def tearDown(self): os.remove(self.tempfile) super(TestPostSamples, self).tearDown() def setUp(self): self.published = [] notifier = mock.Mock() notifier.sample.side_effect = self.fake_notifier_sample self.useFixture(mockpatch.Patch('oslo_messaging.Notifier', return_value=notifier)) super(TestPostSamples, self).setUp() def test_one(self): s1 = [{'counter_name': 'apples', 'counter_type': 'gauge', 'counter_unit': 'instance', 'counter_volume': 1, 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', 'resource_metadata': {'name1': 'value1', 'name2': 'value2'}}] data = self.post_json('/meters/apples/', s1) # timestamp not given so it is generated. s1[0]['timestamp'] = data.json[0]['timestamp'] # Ignore message id that is randomly generated s1[0]['message_id'] = data.json[0]['message_id'] # source is generated if not provided. s1[0]['source'] = '%s:openstack' % s1[0]['project_id'] self.assertEqual(s1, data.json) self.assertEqual(s1[0], self.published[0][0]) def test_nested_metadata(self): s1 = [{'counter_name': 'apples', 'counter_type': 'gauge', 'counter_unit': 'instance', 'counter_volume': 1, 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', 'resource_metadata': {'nest.name1': 'value1', 'name2': 'value2', 'nest.name2': 'value3'}}] data = self.post_json('/meters/apples/', s1) # timestamp not given so it is generated. s1[0]['timestamp'] = data.json[0]['timestamp'] # Ignore message id that is randomly generated s1[0]['message_id'] = data.json[0]['message_id'] # source is generated if not provided. s1[0]['source'] = '%s:openstack' % s1[0]['project_id'] unwound = copy.copy(s1[0]) unwound['resource_metadata'] = {'nest': {'name1': 'value1', 'name2': 'value3'}, 'name2': 'value2'} # only the published sample should be unwound, not the representation # in the API response self.assertEqual(s1[0], data.json[0]) self.assertEqual(unwound, self.published[0][0]) def test_invalid_counter_type(self): s1 = [{'counter_name': 'my_counter_name', 'counter_type': 'INVALID_TYPE', 'counter_unit': 'instance', 'counter_volume': 1, 'source': 'closedstack', 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', 'resource_metadata': {'name1': 'value1', 'name2': 'value2'}}] data = self.post_json('/meters/my_counter_name/', s1, expect_errors=True) self.assertEqual(400, data.status_int) self.assertEqual(0, len(self.published)) def test_messsage_id_provided(self): """Do not accept sample with message_id.""" s1 = [{'counter_name': 'my_counter_name', 'counter_type': 'gauge', 'counter_unit': 'instance', 'counter_volume': 1, 'message_id': 'evil', 'source': 'closedstack', 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', 'resource_metadata': {'name1': 'value1', 'name2': 'value2'}}] data = self.post_json('/meters/my_counter_name/', s1, expect_errors=True) self.assertEqual(400, data.status_int) self.assertEqual(0, len(self.published)) def test_wrong_project_id(self): """Do not accept cross posting samples to different projects.""" s1 = [{'counter_name': 'my_counter_name', 'counter_type': 'gauge', 'counter_unit': 'instance', 'counter_volume': 1, 'source': 'closedstack', 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', 'resource_metadata': {'name1': 'value1', 'name2': 'value2'}}] data = self.post_json('/meters/my_counter_name/', s1, expect_errors=True, headers={ "X-Roles": "Member", "X-Tenant-Name": "lu-tenant", "X-Project-Id": "bc23a9d531064583ace8f67dad60f6bb", }) self.assertEqual(400, data.status_int) self.assertEqual(0, len(self.published)) def test_multiple_samples(self): """Send multiple samples. The usecase here is to reduce the chatter and send the counters at a slower cadence. """ samples = [] for x in range(6): dt = datetime.datetime(2012, 8, 27, x, 0, tzinfo=None) s = {'counter_name': 'apples', 'counter_type': 'gauge', 'counter_unit': 'instance', 'counter_volume': float(x * 3), 'source': 'evil', 'timestamp': dt.isoformat(), 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', 'resource_metadata': {'name1': str(x), 'name2': str(x + 4)}} samples.append(s) data = self.post_json('/meters/apples/', samples) for x, s in enumerate(samples): # source is modified to include the project_id. s['source'] = '%s:%s' % (s['project_id'], s['source']) # Ignore message id that is randomly generated s['message_id'] = data.json[x]['message_id'] # remove tzinfo to compare generated timestamp # with the provided one c = data.json[x] timestamp = timeutils.parse_isotime(c['timestamp']) c['timestamp'] = timestamp.replace(tzinfo=None).isoformat() # do the same on the pipeline msg = self.published[0][x] timestamp = timeutils.parse_isotime(msg['timestamp']) msg['timestamp'] = timestamp.replace(tzinfo=None).isoformat() self.assertEqual(s, c) self.assertEqual(s, self.published[0][x]) def test_missing_mandatory_fields(self): """Do not accept posting samples with missing mandatory fields.""" s1 = [{'counter_name': 'my_counter_name', 'counter_type': 'gauge', 'counter_unit': 'instance', 'counter_volume': 1, 'source': 'closedstack', 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', 'resource_metadata': {'name1': 'value1', 'name2': 'value2'}}] # one by one try posting without a mandatory field. for m in ['counter_volume', 'counter_unit', 'counter_type', 'resource_id', 'counter_name']: s_broke = copy.copy(s1) del s_broke[0][m] print('posting without %s' % m) data = self.post_json('/meters/my_counter_name', s_broke, expect_errors=True) self.assertEqual(400, data.status_int) def test_multiple_project_id_and_admin(self): """Allow admin is allowed to set multiple project_id.""" s1 = [{'counter_name': 'my_counter_name', 'counter_type': 'gauge', 'counter_unit': 'instance', 'counter_volume': 1, 'source': 'closedstack', 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', }, {'counter_name': 'my_counter_name', 'counter_type': 'gauge', 'counter_unit': 'instance', 'counter_volume': 2, 'source': 'closedstack', 'project_id': '4af38dca-f6fc-11e2-94f5-14dae9283f29', 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', 'resource_metadata': {'name1': 'value1', 'name2': 'value2'}}] data = self.post_json('/meters/my_counter_name/', s1, headers={"X-Roles": "admin"}) self.assertEqual(201, data.status_int) for x, s in enumerate(s1): # source is modified to include the project_id. s['source'] = '%s:%s' % (s['project_id'], 'closedstack') # Ignore message id that is randomly generated s['message_id'] = data.json[x]['message_id'] # timestamp not given so it is generated. s['timestamp'] = data.json[x]['timestamp'] s.setdefault('resource_metadata', dict()) self.assertEqual(s, data.json[x]) self.assertEqual(s, self.published[0][x]) def test_multiple_samples_multiple_sources(self): """Test posting with special conditions. Do accept a single post with some multiples sources with some of them null. """ s1 = [{'counter_name': 'my_counter_name', 'counter_type': 'gauge', 'counter_unit': 'instance', 'counter_volume': 1, 'source': 'paperstack', 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', }, {'counter_name': 'my_counter_name', 'counter_type': 'gauge', 'counter_unit': 'instance', 'counter_volume': 5, 'source': 'waterstack', 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', }, {'counter_name': 'my_counter_name', 'counter_type': 'gauge', 'counter_unit': 'instance', 'counter_volume': 2, 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', 'resource_metadata': {'name1': 'value1', 'name2': 'value2'}}] data = self.post_json('/meters/my_counter_name/', s1, expect_errors=True) self.assertEqual(201, data.status_int) for x, s in enumerate(s1): # source is modified to include the project_id. s['source'] = '%s:%s' % ( s['project_id'], s.get('source', self.CONF.sample_source) ) # Ignore message id that is randomly generated s['message_id'] = data.json[x]['message_id'] # timestamp not given so it is generated. s['timestamp'] = data.json[x]['timestamp'] s.setdefault('resource_metadata', dict()) self.assertEqual(s, data.json[x]) self.assertEqual(s, self.published[0][x]) def test_missing_project_user_id(self): """Ensure missing project & user IDs are defaulted appropriately.""" s1 = [{'counter_name': 'my_counter_name', 'counter_type': 'gauge', 'counter_unit': 'instance', 'counter_volume': 1, 'source': 'closedstack', 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', 'resource_metadata': {'name1': 'value1', 'name2': 'value2'}}] project_id = 'bc23a9d531064583ace8f67dad60f6bb' user_id = 'fd87807-12d2-4b38-9c70-5f5c2ac427ff' data = self.post_json('/meters/my_counter_name/', s1, expect_errors=True, headers={ 'X-Roles': 'chief-bottle-washer', 'X-Project-Id': project_id, 'X-User-Id': user_id, }) self.assertEqual(201, data.status_int) for x, s in enumerate(s1): # source is modified to include the project_id. s['source'] = '%s:%s' % (project_id, s['source']) # Ignore message id that is randomly generated s['message_id'] = data.json[x]['message_id'] # timestamp not given so it is generated. s['timestamp'] = data.json[x]['timestamp'] s['user_id'] = user_id s['project_id'] = project_id self.assertEqual(s, data.json[x]) self.assertEqual(s, self.published[0][x]) ceilometer-6.1.5/ceilometer/tests/functional/api/v2/test_api_upgrade.py0000664000567000056710000001445613072744706027465 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import fileutils from oslotest import mockpatch import six from ceilometer.tests.functional.api import v2 class TestAPIUpgradePath(v2.FunctionalTest): def _make_app(self): content = ('{"default": ""}') if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='policy', suffix='.json') self.CONF.set_override("policy_file", self.tempfile, group='oslo_policy') return super(TestAPIUpgradePath, self)._make_app() def _setup_osloconfig_options(self): self.CONF.set_override('gnocchi_is_enabled', True, group='api') self.CONF.set_override('aodh_is_enabled', True, group='api') self.CONF.set_override('aodh_url', 'http://alarm-endpoint:8008/', group='api') def _setup_keystone_mock(self): self.CONF.set_override('gnocchi_is_enabled', None, group='api') self.CONF.set_override('aodh_is_enabled', None, group='api') self.CONF.set_override('aodh_url', None, group='api') self.CONF.set_override('meter_dispatchers', ['database']) self.ks = mock.Mock() self.catalog = (self.ks.session.auth.get_access. return_value.service_catalog) self.catalog.url_for.side_effect = self._url_for self.useFixture(mockpatch.Patch( 'ceilometer.keystone_client.get_client', return_value=self.ks)) @staticmethod def _url_for(service_type=None): if service_type == 'metric': return 'http://gnocchi/' elif service_type == 'alarming': return 'http://alarm-endpoint:8008/' def _do_test_gnocchi_enabled_without_database_backend(self): self.CONF.set_override('meter_dispatchers', 'gnocchi') for endpoint in ['meters', 'samples', 'resources']: response = self.app.get(self.PATH_PREFIX + '/' + endpoint, status=410) self.assertIn(b'Gnocchi API', response.body) headers_events = {"X-Roles": "admin", "X-User-Id": "user1", "X-Project-Id": "project1"} for endpoint in ['events', 'event_types']: self.app.get(self.PATH_PREFIX + '/' + endpoint, headers=headers_events, status=200) response = self.post_json('/query/samples', params={ "filter": '{"=": {"type": "creation"}}', "orderby": '[{"timestamp": "DESC"}]', "limit": 3 }, status=410) self.assertIn(b'Gnocchi API', response.body) sample_params = { "counter_type": "gauge", "counter_name": "fake_counter", "resource_id": "fake_resource_id", "counter_unit": "fake_unit", "counter_volume": "1" } self.post_json('/meters/fake_counter', params=[sample_params], status=201) response = self.post_json('/meters/fake_counter?direct=1', params=[sample_params], status=400) self.assertIn(b'direct option cannot be true when Gnocchi is enabled', response.body) def _do_test_alarm_redirect(self): response = self.app.get(self.PATH_PREFIX + '/alarms', expect_errors=True) self.assertEqual(307, response.status_code) self.assertEqual("http://alarm-endpoint:8008/v2/alarms", response.headers['Location']) response = self.app.get(self.PATH_PREFIX + '/alarms/uuid', expect_errors=True) self.assertEqual(307, response.status_code) self.assertEqual("http://alarm-endpoint:8008/v2/alarms/uuid", response.headers['Location']) response = self.app.delete(self.PATH_PREFIX + '/alarms/uuid', expect_errors=True) self.assertEqual(307, response.status_code) self.assertEqual("http://alarm-endpoint:8008/v2/alarms/uuid", response.headers['Location']) response = self.post_json('/query/alarms', params={ "filter": '{"=": {"type": "creation"}}', "orderby": '[{"timestamp": "DESC"}]', "limit": 3 }, status=307) self.assertEqual("http://alarm-endpoint:8008/v2/query/alarms", response.headers['Location']) def test_gnocchi_enabled_without_database_backend_keystone(self): self._setup_keystone_mock() self._do_test_gnocchi_enabled_without_database_backend() self.catalog.url_for.assert_has_calls([ mock.call(service_type="alarming"), mock.call(service_type="metric")], any_order=True) def test_gnocchi_enabled_without_database_backend_configoptions(self): self._setup_osloconfig_options() self._do_test_gnocchi_enabled_without_database_backend() def test_alarm_redirect_keystone(self): self._setup_keystone_mock() self._do_test_alarm_redirect() self.assertEqual([mock.call(service_type="alarming")], self.catalog.url_for.mock_calls) def test_alarm_redirect_configoptions(self): self._setup_osloconfig_options() self._do_test_alarm_redirect() ceilometer-6.1.5/ceilometer/tests/functional/api/v2/test_app.py0000664000567000056710000001057713072744703025762 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp. # Copyright 2013 Julien Danjou # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test basic ceilometer-api app """ from ceilometer.tests.functional.api import v2 class TestPecanApp(v2.FunctionalTest): def test_pecan_extension_guessing_unset(self): # check Pecan does not assume .jpg is an extension response = self.app.get(self.PATH_PREFIX + '/meters/meter.jpg') self.assertEqual('application/json', response.content_type) class TestApiMiddleware(v2.FunctionalTest): no_lang_translated_error = 'No lang translated error' en_US_translated_error = 'en-US translated error' def _fake_translate(self, message, user_locale): if user_locale is None: return self.no_lang_translated_error else: return self.en_US_translated_error def test_json_parsable_error_middleware_404(self): response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/json"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/json,application/xml"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/xml;q=0.8, \ application/json"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) response = self.get_json('/invalid_path', expect_errors=True ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "text/html,*/*"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) def test_xml_parsable_error_middleware_404(self): response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/xml,*/*"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/xml", response.content_type) self.assertEqual('error_message', response.xml.tag) response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/json;q=0.8 \ ,application/xml"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/xml", response.content_type) self.assertEqual('error_message', response.xml.tag) ceilometer-6.1.5/ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py0000664000567000056710000003134513072744706032143 0ustar jenkinsjenkins00000000000000# # Copyright Ericsson AB 2013. All rights reserved # # Authors: Ildiko Vancsa # Balazs Gibizer # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests complex queries for samples """ import datetime from oslo_utils import timeutils from ceilometer.publisher import utils from ceilometer import sample from ceilometer.tests.functional.api import v2 as tests_api admin_header = {"X-Roles": "admin", "X-Project-Id": "project-id1"} non_admin_header = {"X-Roles": "Member", "X-Project-Id": "project-id1"} class TestQueryMetersController(tests_api.FunctionalTest): def setUp(self): super(TestQueryMetersController, self).setUp() self.url = '/query/samples' for cnt in [ sample.Sample('meter.test', 'cumulative', '', 1, 'user-id1', 'project-id1', 'resource-id1', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server1', 'tag': 'self.sample', 'size': 456, 'util': 0.25, 'is_public': True}, source='test_source'), sample.Sample('meter.test', 'cumulative', '', 2, 'user-id2', 'project-id2', 'resource-id2', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server2', 'tag': 'self.sample', 'size': 123, 'util': 0.75, 'is_public': True}, source='test_source'), sample.Sample('meter.test', 'cumulative', '', 3, 'user-id3', 'project-id3', 'resource-id3', timestamp=datetime.datetime(2012, 7, 2, 10, 42), resource_metadata={'display_name': 'test-server3', 'tag': 'self.sample', 'size': 789, 'util': 0.95, 'is_public': True}, source='test_source')]: msg = utils.meter_message_from_counter( cnt, self.CONF.publisher.telemetry_secret) self.conn.record_metering_data(msg) def test_query_fields_are_optional(self): data = self.post_json(self.url, params={}) self.assertEqual(3, len(data.json)) def test_query_with_isotime(self): date_time = datetime.datetime(2012, 7, 2, 10, 41) isotime = date_time.isoformat() data = self.post_json(self.url, params={"filter": '{">=": {"timestamp": "' + isotime + '"}}'}) self.assertEqual(2, len(data.json)) for sample_item in data.json: result_time = timeutils.parse_isotime(sample_item['timestamp']) result_time = result_time.replace(tzinfo=None) self.assertTrue(result_time >= date_time) def test_non_admin_tenant_sees_only_its_own_project(self): data = self.post_json(self.url, params={}, headers=non_admin_header) for sample_item in data.json: self.assertEqual("project-id1", sample_item['project_id']) def test_non_admin_tenant_cannot_query_others_project(self): data = self.post_json(self.url, params={"filter": '{"=": {"project_id": "project-id2"}}'}, expect_errors=True, headers=non_admin_header) self.assertEqual(401, data.status_int) self.assertIn(b"Not Authorized to access project project-id2", data.body) def test_non_admin_tenant_can_explicitly_filter_for_own_project(self): data = self.post_json(self.url, params={"filter": '{"=": {"project_id": "project-id1"}}'}, headers=non_admin_header) for sample_item in data.json: self.assertEqual("project-id1", sample_item['project_id']) def test_admin_tenant_sees_every_project(self): data = self.post_json(self.url, params={}, headers=admin_header) self.assertEqual(3, len(data.json)) for sample_item in data.json: self.assertIn(sample_item['project_id'], (["project-id1", "project-id2", "project-id3"])) def test_admin_tenant_sees_every_project_with_complex_filter(self): filter = ('{"OR": ' + '[{"=": {"project_id": "project-id1"}}, ' + '{"=": {"project_id": "project-id2"}}]}') data = self.post_json(self.url, params={"filter": filter}, headers=admin_header) self.assertEqual(2, len(data.json)) for sample_item in data.json: self.assertIn(sample_item['project_id'], (["project-id1", "project-id2"])) def test_admin_tenant_sees_every_project_with_in_filter(self): filter = ('{"In": ' + '{"project_id": ["project-id1", "project-id2"]}}') data = self.post_json(self.url, params={"filter": filter}, headers=admin_header) self.assertEqual(2, len(data.json)) for sample_item in data.json: self.assertIn(sample_item['project_id'], (["project-id1", "project-id2"])) def test_admin_tenant_can_query_any_project(self): data = self.post_json(self.url, params={"filter": '{"=": {"project_id": "project-id2"}}'}, headers=admin_header) self.assertEqual(1, len(data.json)) for sample_item in data.json: self.assertIn(sample_item['project_id'], set(["project-id2"])) def test_query_with_orderby(self): data = self.post_json(self.url, params={"orderby": '[{"project_id": "DESC"}]'}) self.assertEqual(3, len(data.json)) self.assertEqual(["project-id3", "project-id2", "project-id1"], [s["project_id"] for s in data.json]) def test_query_with_field_name_project(self): data = self.post_json(self.url, params={"filter": '{"=": {"project": "project-id2"}}'}) self.assertEqual(1, len(data.json)) for sample_item in data.json: self.assertIn(sample_item['project_id'], set(["project-id2"])) def test_query_with_field_name_resource(self): data = self.post_json(self.url, params={"filter": '{"=": {"resource": "resource-id2"}}'}) self.assertEqual(1, len(data.json)) for sample_item in data.json: self.assertIn(sample_item['resource_id'], set(["resource-id2"])) def test_query_with_wrong_field_name(self): data = self.post_json(self.url, params={"filter": '{"=": {"unknown": "resource-id2"}}'}, expect_errors=True) self.assertEqual(400, data.status_int) self.assertIn(b"is not valid under any of the given schemas", data.body) def test_query_with_wrong_json(self): data = self.post_json(self.url, params={"filter": '{"=": "resource": "resource-id2"}}'}, expect_errors=True) self.assertEqual(400, data.status_int) self.assertIn(b"Filter expression not valid", data.body) def test_query_with_field_name_user(self): data = self.post_json(self.url, params={"filter": '{"=": {"user": "user-id2"}}'}) self.assertEqual(1, len(data.json)) for sample_item in data.json: self.assertIn(sample_item['user_id'], set(["user-id2"])) def test_query_with_field_name_meter(self): data = self.post_json(self.url, params={"filter": '{"=": {"meter": "meter.test"}}'}) self.assertEqual(3, len(data.json)) for sample_item in data.json: self.assertIn(sample_item['meter'], set(["meter.test"])) def test_query_with_lower_and_upper_case_orderby(self): data = self.post_json(self.url, params={"orderby": '[{"project_id": "DeSc"}]'}) self.assertEqual(3, len(data.json)) self.assertEqual(["project-id3", "project-id2", "project-id1"], [s["project_id"] for s in data.json]) def test_query_with_user_field_name_orderby(self): data = self.post_json(self.url, params={"orderby": '[{"user": "aSc"}]'}) self.assertEqual(3, len(data.json)) self.assertEqual(["user-id1", "user-id2", "user-id3"], [s["user_id"] for s in data.json]) def test_query_with_volume_field_name_orderby(self): data = self.post_json(self.url, params={"orderby": '[{"volume": "deSc"}]'}) self.assertEqual(3, len(data.json)) self.assertEqual([3, 2, 1], [s["volume"] for s in data.json]) def test_query_with_missing_order_in_orderby(self): data = self.post_json(self.url, params={"orderby": '[{"project_id": ""}]'}, expect_errors=True) self.assertEqual(400, data.status_int) self.assertIn(b"does not match '(?i)^asc$|^desc$'", data.body) def test_query_with_wrong_json_in_orderby(self): data = self.post_json(self.url, params={"orderby": '{"project_id": "desc"}]'}, expect_errors=True) self.assertEqual(400, data.status_int) self.assertIn(b"Order-by expression not valid: Extra data", data.body) def test_filter_with_metadata(self): data = self.post_json(self.url, params={"filter": '{">=": {"metadata.util": 0.5}}'}) self.assertEqual(2, len(data.json)) for sample_item in data.json: self.assertTrue(float(sample_item["metadata"]["util"]) >= 0.5) def test_filter_with_negation(self): filter_expr = '{"not": {">=": {"metadata.util": 0.5}}}' data = self.post_json(self.url, params={"filter": filter_expr}) self.assertEqual(1, len(data.json)) for sample_item in data.json: self.assertTrue(float(sample_item["metadata"]["util"]) < 0.5) def test_limit_must_be_positive(self): data = self.post_json(self.url, params={"limit": 0}, expect_errors=True) self.assertEqual(400, data.status_int) self.assertIn(b"Limit must be positive", data.body) def test_default_limit(self): self.CONF.set_override('default_api_return_limit', 1, group='api') data = self.post_json(self.url, params={}) self.assertEqual(1, len(data.json)) ceilometer-6.1.5/ceilometer/tests/functional/api/v2/test_event_scenarios.py0000664000567000056710000007015213072744706030367 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test event, event_type and trait retrieval.""" import datetime import uuid import webtest.app from ceilometer.event.storage import models from ceilometer.tests import db as tests_db from ceilometer.tests.functional.api import v2 USER_ID = uuid.uuid4().hex PROJ_ID = uuid.uuid4().hex HEADERS = {"X-Roles": "admin", "X-User-Id": USER_ID, "X-Project-Id": PROJ_ID} class EventTestBase(v2.FunctionalTest): def setUp(self): super(EventTestBase, self).setUp() self._generate_models() def _generate_models(self): event_models = [] base = 0 self.s_time = datetime.datetime(2013, 12, 31, 5, 0) self.trait_time = datetime.datetime(2013, 12, 31, 5, 0) for event_type in ['Foo', 'Bar', 'Zoo']: trait_models = [models.Trait(name, type, value) for name, type, value in [ ('trait_A', models.Trait.TEXT_TYPE, "my_%s_text" % event_type), ('trait_B', models.Trait.INT_TYPE, base + 1), ('trait_C', models.Trait.FLOAT_TYPE, float(base) + 0.123456), ('trait_D', models.Trait.DATETIME_TYPE, self.trait_time)]] # Message ID for test will be 'base'. So, message ID for the first # event will be '0', the second '100', and so on. # trait_time in first event will be equal to self.trait_time # (datetime.datetime(2013, 12, 31, 5, 0)), next will add 1 day, so # second will be (datetime.datetime(2014, 01, 01, 5, 0)) and so on. event_models.append( models.Event(message_id=str(base), event_type=event_type, generated=self.trait_time, traits=trait_models, raw={'status': {'nested': 'started'}})) base += 100 self.trait_time += datetime.timedelta(days=1) self.event_conn.record_events(event_models) class TestEventTypeAPI(EventTestBase): PATH = '/event_types' def test_event_types(self): data = self.get_json(self.PATH, headers=HEADERS) for event_type in ['Foo', 'Bar', 'Zoo']: self.assertIn(event_type, data) class TestTraitAPI(EventTestBase): PATH = '/event_types/%s/traits' def test_get_traits_for_event(self): path = self.PATH % "Foo" data = self.get_json(path, headers=HEADERS) self.assertEqual(4, len(data)) def test_get_event_invalid_path(self): data = self.get_json('/event_types/trait_A/', headers=HEADERS, expect_errors=True) self.assertEqual(404, data.status_int) def test_get_traits_for_non_existent_event(self): path = self.PATH % "NO_SUCH_EVENT_TYPE" data = self.get_json(path, headers=HEADERS) self.assertEqual([], data) def test_get_trait_data_for_event(self): path = (self.PATH % "Foo") + "/trait_A" data = self.get_json(path, headers=HEADERS) self.assertEqual(1, len(data)) self.assertEqual("trait_A", data[0]['name']) path = (self.PATH % "Foo") + "/trait_B" data = self.get_json(path, headers=HEADERS) self.assertEqual(1, len(data)) self.assertEqual("trait_B", data[0]['name']) self.assertEqual("1", data[0]['value']) path = (self.PATH % "Foo") + "/trait_D" data = self.get_json(path, headers=HEADERS) self.assertEqual(1, len(data)) self.assertEqual("trait_D", data[0]['name']) self.assertEqual((self.trait_time - datetime.timedelta(days=3)). isoformat(), data[0]['value']) def test_get_trait_data_for_non_existent_event(self): path = (self.PATH % "NO_SUCH_EVENT") + "/trait_A" data = self.get_json(path, headers=HEADERS) self.assertEqual([], data) def test_get_trait_data_for_non_existent_trait(self): path = (self.PATH % "Foo") + "/no_such_trait" data = self.get_json(path, headers=HEADERS) self.assertEqual([], data) class TestEventAPI(EventTestBase): PATH = '/events' def test_get_events(self): data = self.get_json(self.PATH, headers=HEADERS) self.assertEqual(3, len(data)) # We expect to get native UTC generated time back trait_time = self.s_time for event in data: expected_generated = trait_time.isoformat() self.assertIn(event['event_type'], ['Foo', 'Bar', 'Zoo']) self.assertEqual(4, len(event['traits'])) self.assertEqual({'status': {'nested': 'started'}}, event['raw']), self.assertEqual(expected_generated, event['generated']) for trait_name in ['trait_A', 'trait_B', 'trait_C', 'trait_D']: self.assertIn(trait_name, map(lambda x: x['name'], event['traits'])) trait_time += datetime.timedelta(days=1) def test_get_event_by_message_id(self): event = self.get_json(self.PATH + "/100", headers=HEADERS) expected_traits = [{'name': 'trait_A', 'type': 'string', 'value': 'my_Bar_text'}, {'name': 'trait_B', 'type': 'integer', 'value': '101'}, {'name': 'trait_C', 'type': 'float', 'value': '100.123456'}, {'name': 'trait_D', 'type': 'datetime', 'value': '2014-01-01T05:00:00'}] self.assertEqual('100', event['message_id']) self.assertEqual('Bar', event['event_type']) self.assertEqual('2014-01-01T05:00:00', event['generated']) self.assertEqual(expected_traits, event['traits']) def test_get_event_by_message_id_no_such_id(self): data = self.get_json(self.PATH + "/DNE", headers=HEADERS, expect_errors=True) self.assertEqual(404, data.status_int) def test_get_events_filter_event_type(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'event_type', 'value': 'Foo'}]) self.assertEqual(1, len(data)) def test_get_events_filter_trait_no_type(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text'}]) self.assertEqual(1, len(data)) self.assertEqual('Foo', data[0]['event_type']) def test_get_events_filter_trait_empty_type(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'type': ''}]) self.assertEqual(1, len(data)) self.assertEqual('Foo', data[0]['event_type']) def test_get_events_filter_trait_invalid_type(self): resp = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'whats-up'}], expect_errors=True) self.assertEqual(400, resp.status_code) self.assertEqual("The data type whats-up is not supported. The " "supported data type list is: [\'integer\', " "\'float\', \'string\', \'datetime\']", resp.json['error_message']['faultstring']) def test_get_events_filter_operator_invalid_type(self): resp = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'op': 'whats-up'}], expect_errors=True) self.assertEqual(400, resp.status_code) self.assertEqual("Operator whats-up is not supported. The " "supported operators are: (\'lt\', \'le\', " "\'eq\', \'ne\', \'ge\', \'gt\')", resp.json['error_message']['faultstring']) def test_get_events_filter_text_trait(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'string'}]) self.assertEqual(1, len(data)) self.assertEqual('Foo', data[0]['event_type']) def test_get_events_filter_int_trait(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '101', 'type': 'integer'}]) self.assertEqual(1, len(data)) self.assertEqual('Bar', data[0]['event_type']) traits = [x for x in data[0]['traits'] if x['name'] == 'trait_B'] self.assertEqual(1, len(traits)) self.assertEqual('integer', traits[0]['type']) self.assertEqual('101', traits[0]['value']) def test_get_events_filter_float_trait(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '200.123456', 'type': 'float'}]) self.assertEqual(1, len(data)) self.assertEqual('Zoo', data[0]['event_type']) traits = [x for x in data[0]['traits'] if x['name'] == 'trait_C'] self.assertEqual(1, len(traits)) self.assertEqual('float', traits[0]['type']) self.assertEqual('200.123456', traits[0]['value']) def test_get_events_filter_datetime_trait(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2014-01-01T05:00:00', 'type': 'datetime'}]) self.assertEqual(1, len(data)) traits = [x for x in data[0]['traits'] if x['name'] == 'trait_D'] self.assertEqual(1, len(traits)) self.assertEqual('datetime', traits[0]['type']) self.assertEqual('2014-01-01T05:00:00', traits[0]['value']) def test_get_events_multiple_filters(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '1', 'type': 'integer'}, {'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'string'}]) self.assertEqual(1, len(data)) self.assertEqual('Foo', data[0]['event_type']) def test_get_events_multiple_filters_no_matches(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '101', 'type': 'integer'}, {'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'string'}]) self.assertEqual(0, len(data)) def test_get_events_multiple_filters_same_field_different_values(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'string'}, {'field': 'trait_A', 'value': 'my_Bar_text', 'type': 'string'}]) self.assertEqual(0, len(data)) def test_get_events_not_filters(self): data = self.get_json(self.PATH, headers=HEADERS, q=[]) self.assertEqual(3, len(data)) def test_get_events_filter_op_string(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'string', 'op': 'eq'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Bar_text', 'type': 'string', 'op': 'lt'}]) self.assertEqual(0, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Zoo_text', 'type': 'string', 'op': 'le'}]) self.assertEqual(3, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'string', 'op': 'ne'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Bar_text', 'type': 'string', 'op': 'gt'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Zoo_text', 'type': 'string', 'op': 'ge'}]) self.assertEqual(1, len(data)) def test_get_events_filter_op_integer(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '101', 'type': 'integer', 'op': 'eq'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '201', 'type': 'integer', 'op': 'lt'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '1', 'type': 'integer', 'op': 'le'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '101', 'type': 'integer', 'op': 'ne'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '201', 'type': 'integer', 'op': 'gt'}]) self.assertEqual(0, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '1', 'type': 'integer', 'op': 'ge'}]) self.assertEqual(3, len(data)) def test_get_events_filter_op_float(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '100.123456', 'type': 'float', 'op': 'eq'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '200.123456', 'type': 'float', 'op': 'lt'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '0.123456', 'type': 'float', 'op': 'le'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '100.123456', 'type': 'float', 'op': 'ne'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '200.123456', 'type': 'float', 'op': 'gt'}]) self.assertEqual(0, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '0.123456', 'type': 'float', 'op': 'ge'}]) self.assertEqual(3, len(data)) def test_get_events_filter_op_datatime(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2014-01-01T05:00:00', 'type': 'datetime', 'op': 'eq'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2014-01-02T05:00:00', 'type': 'datetime', 'op': 'lt'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2013-12-31T05:00:00', 'type': 'datetime', 'op': 'le'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2014-01-01T05:00:00', 'type': 'datetime', 'op': 'ne'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2014-01-02T05:00:00', 'type': 'datetime', 'op': 'gt'}]) self.assertEqual(0, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2013-12-31T05:00:00', 'type': 'datetime', 'op': 'ge'}]) self.assertEqual(3, len(data)) def test_get_events_filter_wrong_op(self): self.assertRaises(webtest.app.AppError, self.get_json, self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '1', 'type': 'integer', 'op': 'el'}]) class AclRestrictedEventTestBase(v2.FunctionalTest): def setUp(self): super(AclRestrictedEventTestBase, self).setUp() self.admin_user_id = uuid.uuid4().hex self.admin_proj_id = uuid.uuid4().hex self.user_id = uuid.uuid4().hex self.proj_id = uuid.uuid4().hex self._generate_models() def _generate_models(self): event_models = [] self.s_time = datetime.datetime(2013, 12, 31, 5, 0) event_models.append( models.Event(message_id='1', event_type='empty_ev', generated=self.s_time, traits=[models.Trait('random', models.Trait.TEXT_TYPE, 'blah')], raw={})) event_models.append( models.Event(message_id='2', event_type='admin_ev', generated=self.s_time, traits=[models.Trait('project_id', models.Trait.TEXT_TYPE, self.admin_proj_id), models.Trait('user_id', models.Trait.TEXT_TYPE, self.admin_user_id)], raw={})) event_models.append( models.Event(message_id='3', event_type='user_ev', generated=self.s_time, traits=[models.Trait('project_id', models.Trait.TEXT_TYPE, self.proj_id), models.Trait('user_id', models.Trait.TEXT_TYPE, self.user_id)], raw={})) self.event_conn.record_events(event_models) def test_non_admin_access(self): a_headers = {"X-Roles": "member", "X-User-Id": self.user_id, "X-Project-Id": self.proj_id} data = self.get_json('/events', headers=a_headers) self.assertEqual(1, len(data)) self.assertEqual('user_ev', data[0]['event_type']) def test_non_admin_access_single(self): a_headers = {"X-Roles": "member", "X-User-Id": self.user_id, "X-Project-Id": self.proj_id} data = self.get_json('/events/3', headers=a_headers) self.assertEqual('user_ev', data['event_type']) def test_non_admin_access_incorrect_user(self): a_headers = {"X-Roles": "member", "X-User-Id": 'blah', "X-Project-Id": self.proj_id} data = self.get_json('/events', headers=a_headers) self.assertEqual(0, len(data)) def test_non_admin_access_incorrect_proj(self): a_headers = {"X-Roles": "member", "X-User-Id": self.user_id, "X-Project-Id": 'blah'} data = self.get_json('/events', headers=a_headers) self.assertEqual(0, len(data)) def test_non_admin_access_single_invalid(self): a_headers = {"X-Roles": "member", "X-User-Id": self.user_id, "X-Project-Id": self.proj_id} data = self.get_json('/events/1', headers=a_headers, expect_errors=True) self.assertEqual(404, data.status_int) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es', 'db2') def test_admin_access(self): a_headers = {"X-Roles": "admin", "X-User-Id": self.admin_user_id, "X-Project-Id": self.admin_proj_id} data = self.get_json('/events', headers=a_headers) self.assertEqual(2, len(data)) self.assertEqual(set(['empty_ev', 'admin_ev']), set(ev['event_type'] for ev in data)) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es', 'db2') def test_admin_access_trait_filter(self): a_headers = {"X-Roles": "admin", "X-User-Id": self.admin_user_id, "X-Project-Id": self.admin_proj_id} data = self.get_json('/events', headers=a_headers, q=[{'field': 'random', 'value': 'blah', 'type': 'string', 'op': 'eq'}]) self.assertEqual(1, len(data)) self.assertEqual('empty_ev', data[0]['event_type']) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es', 'db2') def test_admin_access_single(self): a_headers = {"X-Roles": "admin", "X-User-Id": self.admin_user_id, "X-Project-Id": self.admin_proj_id} data = self.get_json('/events/1', headers=a_headers) self.assertEqual('empty_ev', data['event_type']) data = self.get_json('/events/2', headers=a_headers) self.assertEqual('admin_ev', data['event_type']) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es', 'db2') def test_admin_access_trait_filter_no_access(self): a_headers = {"X-Roles": "admin", "X-User-Id": self.admin_user_id, "X-Project-Id": self.admin_proj_id} data = self.get_json('/events', headers=a_headers, q=[{'field': 'user_id', 'value': self.user_id, 'type': 'string', 'op': 'eq'}]) self.assertEqual(0, len(data)) class EventRestrictionTestBase(v2.FunctionalTest): def setUp(self): super(EventRestrictionTestBase, self).setUp() self.CONF.set_override('default_api_return_limit', 10, group='api') self._generate_models() def _generate_models(self): event_models = [] base = 0 self.s_time = datetime.datetime(2013, 12, 31, 5, 0) self.trait_time = datetime.datetime(2013, 12, 31, 5, 0) for i in range(20): trait_models = [models.Trait(name, type, value) for name, type, value in [ ('trait_A', models.Trait.TEXT_TYPE, "my_text"), ('trait_B', models.Trait.INT_TYPE, base + 1), ('trait_C', models.Trait.FLOAT_TYPE, float(base) + 0.123456), ('trait_D', models.Trait.DATETIME_TYPE, self.trait_time)]] event_models.append( models.Event(message_id=str(uuid.uuid4()), event_type='foo.bar', generated=self.trait_time, traits=trait_models, raw={'status': {'nested': 'started'}})) self.trait_time += datetime.timedelta(seconds=1) self.event_conn.record_events(event_models) class TestEventRestriction(EventRestrictionTestBase): def test_get_limit(self): data = self.get_json('/events?limit=1', headers=HEADERS) self.assertEqual(1, len(data)) def test_get_limit_negative(self): self.assertRaises(webtest.app.AppError, self.get_json, '/events?limit=-2', headers=HEADERS) def test_get_limit_bigger(self): data = self.get_json('/events?limit=100', headers=HEADERS) self.assertEqual(20, len(data)) def test_get_default_limit(self): data = self.get_json('/events', headers=HEADERS) self.assertEqual(10, len(data)) ceilometer-6.1.5/ceilometer/tests/functional/api/v2/test_list_samples_scenarios.py0000664000567000056710000001275613072744703031750 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test listing raw samples. """ import datetime import mock from oslo_utils import timeutils import six from ceilometer.publisher import utils from ceilometer import sample from ceilometer.tests.functional.api import v2 class TestListSamples(v2.FunctionalTest): def setUp(self): super(TestListSamples, self).setUp() patcher = mock.patch.object(timeutils, 'utcnow') self.addCleanup(patcher.stop) self.mock_utcnow = patcher.start() self.mock_utcnow.return_value = datetime.datetime(2014, 2, 11, 16, 42) self.sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project1', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', 'dict_properties': {'key': 'value'}, 'not_ignored_list': ['returned'], }, source='test_source', ) msg = utils.meter_message_from_counter( self.sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) self.sample2 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id2', 'project2', 'resource-id-alternate', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample2', }, source='source2', ) msg2 = utils.meter_message_from_counter( self.sample2, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg2) def test_all(self): data = self.get_json('/meters/instance') self.assertEqual(2, len(data)) for s in data: self.assertEqual(timeutils.utcnow().isoformat(), s['recorded_at']) def test_all_trailing_slash(self): data = self.get_json('/meters/instance/') self.assertEqual(2, len(data)) def test_empty_project(self): data = self.get_json('/meters/instance', q=[{'field': 'project_id', 'value': 'no-such-project', }]) self.assertEqual([], data) def test_by_project(self): data = self.get_json('/meters/instance', q=[{'field': 'project_id', 'value': 'project1', }]) self.assertEqual(1, len(data)) def test_empty_resource(self): data = self.get_json('/meters/instance', q=[{'field': 'resource_id', 'value': 'no-such-resource', }]) self.assertEqual([], data) def test_by_resource(self): data = self.get_json('/meters/instance', q=[{'field': 'resource_id', 'value': 'resource-id', }]) self.assertEqual(1, len(data)) def test_empty_source(self): data = self.get_json('/meters/instance', q=[{'field': 'source', 'value': 'no-such-source', }]) self.assertEqual(0, len(data)) def test_by_source(self): data = self.get_json('/meters/instance', q=[{'field': 'source', 'value': 'test_source', }]) self.assertEqual(1, len(data)) def test_empty_user(self): data = self.get_json('/meters/instance', q=[{'field': 'user_id', 'value': 'no-such-user', }]) self.assertEqual([], data) def test_by_user(self): data = self.get_json('/meters/instance', q=[{'field': 'user_id', 'value': 'user-id', }]) self.assertEqual(1, len(data)) def test_metadata(self): data = self.get_json('/meters/instance', q=[{'field': 'resource_id', 'value': 'resource-id', }]) sample = data[0] self.assertIn('resource_metadata', sample) self.assertEqual( [('dict_properties.key', 'value'), ('display_name', 'test-server'), ('not_ignored_list', "['returned']"), ('tag', 'self.sample'), ], list(sorted(six.iteritems(sample['resource_metadata'])))) ceilometer-6.1.5/ceilometer/tests/functional/api/v2/test_capabilities.py0000664000567000056710000000225613072744706027631 0ustar jenkinsjenkins00000000000000# # Copyright Ericsson AB 2014. All rights reserved # # Authors: Ildiko Vancsa # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.tests.functional.api import v2 as tests_api class TestCapabilitiesController(tests_api.FunctionalTest): def setUp(self): super(TestCapabilitiesController, self).setUp() self.url = '/capabilities' def test_capabilities(self): data = self.get_json(self.url) # check that capabilities data contains both 'api' and 'storage' fields self.assertIsNotNone(data) self.assertNotEqual({}, data) self.assertIn('api', data) self.assertIn('storage', data) ceilometer-6.1.5/ceilometer/tests/functional/api/v2/test_list_resources_scenarios.py0000664000567000056710000004614513072744706032320 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test listing resources. """ import datetime import json import six import webtest.app from ceilometer.publisher import utils from ceilometer import sample from ceilometer.tests.functional.api import v2 class TestListResources(v2.FunctionalTest): def test_empty(self): data = self.get_json('/resources') self.assertEqual([], data) def _verify_resource_timestamps(self, res, first, last): # Bounds need not be tight (see ceilometer bug #1288372) self.assertIn('first_sample_timestamp', res) self.assertTrue(first.isoformat() >= res['first_sample_timestamp']) self.assertIn('last_sample_timestamp', res) self.assertTrue(last.isoformat() <= res['last_sample_timestamp']) def test_instance_no_metadata(self): timestamp = datetime.datetime(2012, 7, 2, 10, 40) sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=timestamp, resource_metadata=None, source='test', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) data = self.get_json('/resources') self.assertEqual(1, len(data)) self._verify_resource_timestamps(data[0], timestamp, timestamp) def test_instances(self): timestamps = { 'resource-id': datetime.datetime(2012, 7, 2, 10, 40), 'resource-id-alternate': datetime.datetime(2012, 7, 2, 10, 41), } sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=timestamps['resource-id'], resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', }, source='test', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) sample2 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id-alternate', timestamp=timestamps['resource-id-alternate'], resource_metadata={'display_name': 'test-server', 'tag': 'self.sample2', }, source='test', ) msg2 = utils.meter_message_from_counter( sample2, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg2) data = self.get_json('/resources') self.assertEqual(2, len(data)) for res in data: timestamp = timestamps.get(res['resource_id']) self._verify_resource_timestamps(res, timestamp, timestamp) def test_instance_multiple_samples(self): timestamps = [ datetime.datetime(2012, 7, 2, 10, 41), datetime.datetime(2012, 7, 2, 10, 42), datetime.datetime(2012, 7, 2, 10, 40), ] for timestamp in timestamps: datapoint = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=timestamp, resource_metadata={'display_name': 'test-server', 'tag': 'self.sample-%s' % timestamp, }, source='test', ) msg = utils.meter_message_from_counter( datapoint, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) data = self.get_json('/resources') self.assertEqual(1, len(data)) self._verify_resource_timestamps(data[0], timestamps[-1], timestamps[1]) def test_instances_one(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', }, source='test', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) sample2 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id-alternate', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample2', }, source='test', ) msg2 = utils.meter_message_from_counter( sample2, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg2) data = self.get_json('/resources/resource-id') self.assertEqual('resource-id', data['resource_id']) def test_with_source(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', }, source='test_list_resources', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) sample2 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id2', 'project-id', 'resource-id-alternate', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample2', }, source='not-test', ) msg2 = utils.meter_message_from_counter( sample2, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg2) data = self.get_json('/resources', q=[{'field': 'source', 'value': 'test_list_resources', }]) ids = [r['resource_id'] for r in data] self.assertEqual(['resource-id'], ids) sources = [r['source'] for r in data] self.assertEqual(['test_list_resources'], sources) def test_with_invalid_resource_id(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id-1', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', }, source='test_list_resources', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) sample2 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id2', 'project-id', 'resource-id-2', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample2', }, source='test_list_resources', ) msg2 = utils.meter_message_from_counter( sample2, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg2) resp1 = self.get_json('/resources/resource-id-1') self.assertEqual("resource-id-1", resp1["resource_id"]) resp2 = self.get_json('/resources/resource-id-2') self.assertEqual("resource-id-2", resp2["resource_id"]) resp3 = self.get_json('/resources/resource-id-3', expect_errors=True) self.assertEqual(404, resp3.status_code) json_data = resp3.body if six.PY3: json_data = json_data.decode('utf-8') self.assertEqual("Resource resource-id-3 Not Found", json.loads(json_data)['error_message'] ['faultstring']) def test_with_user(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', }, source='test_list_resources', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) sample2 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id2', 'project-id', 'resource-id-alternate', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample2', }, source='not-test', ) msg2 = utils.meter_message_from_counter( sample2, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg2) data = self.get_json('/resources', q=[{'field': 'user_id', 'value': 'user-id', }]) ids = [r['resource_id'] for r in data] self.assertEqual(['resource-id'], ids) def test_with_project(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', }, source='test_list_resources', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) sample2 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id2', 'project-id2', 'resource-id-alternate', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample2', }, source='not-test', ) msg2 = utils.meter_message_from_counter( sample2, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg2) data = self.get_json('/resources', q=[{'field': 'project_id', 'value': 'project-id', }]) ids = [r['resource_id'] for r in data] self.assertEqual(['resource-id'], ids) def test_with_user_non_admin(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id2', 'project-id2', 'resource-id-alternate', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample1', }, source='not-test', ) msg2 = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg2) data = self.get_json('/resources', headers={"X-Roles": "Member", "X-Project-Id": "project-id2"}) ids = set(r['resource_id'] for r in data) self.assertEqual(set(['resource-id-alternate']), ids) def test_with_user_wrong_tenant(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id2', 'project-id2', 'resource-id-alternate', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample1', }, source='not-test', ) msg2 = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg2) data = self.get_json('/resources', headers={"X-Roles": "Member", "X-Project-Id": "project-wrong"}) ids = set(r['resource_id'] for r in data) self.assertEqual(set(), ids) def test_metadata(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', 'dict_properties': {'key.$1': {'$key': 'val'}}, 'not_ignored_list': ['returned'], }, source='test', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) data = self.get_json('/resources') metadata = data[0]['metadata'] self.assertEqual([(u'dict_properties.key:$1:$key', u'val'), (u'display_name', u'test-server'), (u'not_ignored_list', u"['returned']"), (u'tag', u'self.sample')], list(sorted(six.iteritems(metadata)))) def test_resource_meter_links(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', }, source='test_list_resources', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) data = self.get_json('/resources') links = data[0]['links'] self.assertEqual(2, len(links)) self.assertEqual('self', links[0]['rel']) self.assertTrue((self.PATH_PREFIX + '/resources/resource-id') in links[0]['href']) self.assertEqual('instance', links[1]['rel']) self.assertTrue((self.PATH_PREFIX + '/meters/instance?' 'q.field=resource_id&q.value=resource-id') in links[1]['href']) def test_resource_skip_meter_links(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', }, source='test_list_resources', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) data = self.get_json('/resources?meter_links=0') links = data[0]['links'] self.assertEqual(len(links), 1) self.assertEqual(links[0]['rel'], 'self') self.assertTrue((self.PATH_PREFIX + '/resources/resource-id') in links[0]['href']) class TestListResourcesRestriction(v2.FunctionalTest): def setUp(self): super(TestListResourcesRestriction, self).setUp() self.CONF.set_override('default_api_return_limit', 10, group='api') for i in range(20): s = sample.Sample( 'volume.size', 'gauge', 'GiB', 5 + i, 'user-id', 'project1', 'resource-id%s' % i, timestamp=(datetime.datetime(2012, 9, 25, 10, 30) + datetime.timedelta(seconds=i)), resource_metadata={'display_name': 'test-volume', 'tag': 'self.sample', }, source='source1', ) msg = utils.meter_message_from_counter( s, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) def test_resource_limit(self): data = self.get_json('/resources?limit=1') self.assertEqual(1, len(data)) def test_resource_limit_negative(self): self.assertRaises(webtest.app.AppError, self.get_json, '/resources?limit=-2') def test_resource_limit_bigger(self): data = self.get_json('/resources?limit=42') self.assertEqual(20, len(data)) def test_resource_default_limit(self): data = self.get_json('/resources') self.assertEqual(10, len(data)) ceilometer-6.1.5/ceilometer/tests/functional/api/v2/__init__.py0000664000567000056710000000131213072744703025665 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.tests.functional import api class FunctionalTest(api.FunctionalTest): PATH_PREFIX = '/v2' ceilometer-6.1.5/ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py0000664000567000056710000010305613072744703031575 0ustar jenkinsjenkins00000000000000# # Copyright 2012 Red Hat, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test listing meters. """ import base64 import datetime from oslo_serialization import jsonutils import six import webtest.app from ceilometer.publisher import utils from ceilometer import sample from ceilometer.tests.functional.api import v2 class TestListEmptyMeters(v2.FunctionalTest): def test_empty(self): data = self.get_json('/meters') self.assertEqual([], data) class TestValidateUserInput(v2.FunctionalTest): def test_list_meters_query_float_metadata(self): self.assertRaises(webtest.app.AppError, self.get_json, '/meters/meter.test', q=[{'field': 'metadata.util', 'op': 'eq', 'value': '0.7.5', 'type': 'float'}]) self.assertRaises(webtest.app.AppError, self.get_json, '/meters/meter.test', q=[{'field': 'metadata.util', 'op': 'eq', 'value': 'abacaba', 'type': 'boolean'}]) self.assertRaises(webtest.app.AppError, self.get_json, '/meters/meter.test', q=[{'field': 'metadata.util', 'op': 'eq', 'value': '45.765', 'type': 'integer'}]) class TestListMetersRestriction(v2.FunctionalTest): def setUp(self): super(TestListMetersRestriction, self).setUp() self.CONF.set_override('default_api_return_limit', 3, group='api') for x in range(5): for i in range(5): s = sample.Sample( 'volume.size%s' % x, 'gauge', 'GiB', 5 + i, 'user-id', 'project1', 'resource-id', timestamp=(datetime.datetime(2012, 9, 25, 10, 30) + datetime.timedelta(seconds=i)), resource_metadata={'display_name': 'test-volume', 'tag': 'self.sample', }, source='source1', ) msg = utils.meter_message_from_counter( s, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) def test_meter_limit(self): data = self.get_json('/meters?limit=1') self.assertEqual(1, len(data)) def test_meter_limit_negative(self): self.assertRaises(webtest.app.AppError, self.get_json, '/meters?limit=-2') def test_meter_limit_bigger(self): data = self.get_json('/meters?limit=42') self.assertEqual(5, len(data)) def test_meter_default_limit(self): data = self.get_json('/meters') self.assertEqual(3, len(data)) def test_old_sample_limit(self): data = self.get_json('/meters/volume.size0?limit=1') self.assertEqual(1, len(data)) def test_old_sample_limit_negative(self): self.assertRaises(webtest.app.AppError, self.get_json, '/meters/volume.size0?limit=-2') def test_old_sample_limit_bigger(self): data = self.get_json('/meters/volume.size0?limit=42') self.assertEqual(5, len(data)) def test_old_sample_default_limit(self): data = self.get_json('/meters/volume.size0') self.assertEqual(3, len(data)) def test_sample_limit(self): data = self.get_json('/samples?limit=1') self.assertEqual(1, len(data)) def test_sample_limit_negative(self): self.assertRaises(webtest.app.AppError, self.get_json, '/samples?limit=-2') def test_sample_limit_bigger(self): data = self.get_json('/samples?limit=42') self.assertEqual(25, len(data)) def test_sample_default_limit(self): data = self.get_json('/samples') self.assertEqual(3, len(data)) class TestListMeters(v2.FunctionalTest): def setUp(self): super(TestListMeters, self).setUp() self.messages = [] for cnt in [ sample.Sample( 'meter.test', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', 'size': 123, 'util': 0.75, 'is_public': True}, source='test_source'), sample.Sample( 'meter.test', 'cumulative', '', 3, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 11, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample1', 'size': 0, 'util': 0.47, 'is_public': False}, source='test_source'), sample.Sample( 'meter.mine', 'gauge', '', 1, 'user-id', 'project-id', 'resource-id2', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample2', 'size': 456, 'util': 0.64, 'is_public': False}, source='test_source'), sample.Sample( 'meter.test', 'cumulative', '', 1, 'user-id2', 'project-id2', 'resource-id3', timestamp=datetime.datetime(2012, 7, 2, 10, 42), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample3', 'size': 0, 'util': 0.75, 'is_public': False}, source='test_source'), sample.Sample( 'meter.test.new', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample3', 'size': 0, 'util': 0.75, 'is_public': False}, source='test_source'), sample.Sample( 'meter.mine', 'gauge', '', 1, 'user-id4', 'project-id2', 'resource-id4', timestamp=datetime.datetime(2012, 7, 2, 10, 43), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample4', 'properties': { 'prop_1': 'prop_value', 'prop_2': {'sub_prop_1': 'sub_prop_value'}, 'prop.3': {'$sub_prop.2': 'sub_prop_value2'} }, 'size': 0, 'util': 0.58, 'is_public': True}, source='test_source1'), sample.Sample( u'meter.accent\xe9\u0437', 'gauge', '', 1, 'user-id4', 'project-id2', 'resource-id4', timestamp=datetime.datetime(2014, 7, 2, 10, 43), resource_metadata={}, source='test_source1')]: msg = utils.meter_message_from_counter( cnt, self.CONF.publisher.telemetry_secret) self.messages.append(msg) self.conn.record_metering_data(msg) def test_list_meters(self): data = self.get_json('/meters') self.assertEqual(6, len(data)) self.assertEqual(set(['resource-id', 'resource-id2', 'resource-id3', 'resource-id4']), set(r['resource_id'] for r in data)) self.assertEqual(set(['meter.test', 'meter.mine', 'meter.test.new', u'meter.accent\xe9\u0437']), set(r['name'] for r in data)) self.assertEqual(set(['test_source', 'test_source1']), set(r['source'] for r in data)) def test_list_unique_meters(self): data = self.get_json('/meters?unique=True') self.assertEqual(4, len(data)) self.assertEqual(set(['meter.test', 'meter.mine', 'meter.test.new', u'meter.accent\xe9\u0437']), set(r['name'] for r in data)) def test_meters_query_with_timestamp(self): date_time = datetime.datetime(2012, 7, 2, 10, 41) isotime = date_time.isoformat() resp = self.get_json('/meters', q=[{'field': 'timestamp', 'op': 'gt', 'value': isotime}], expect_errors=True) self.assertEqual(400, resp.status_code) self.assertEqual('Unknown argument: "timestamp": ' 'not valid for this resource', jsonutils.loads(resp.body)['error_message'] ['faultstring']) def test_list_samples(self): data = self.get_json('/samples') self.assertEqual(7, len(data)) def test_query_samples_with_invalid_field_name_and_non_eq_operator(self): resp = self.get_json('/samples', q=[{'field': 'non_valid_field_name', 'op': 'gt', 'value': 3}], expect_errors=True) resp_string = jsonutils.loads(resp.body) fault_string = resp_string['error_message']['faultstring'] msg = ('Unknown argument: "non_valid_field_name"' ': unrecognized field in query: ' '[ # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import subprocess import time from oslo_utils import fileutils import six from ceilometer.tests import base class BinTestCase(base.BaseTestCase): def setUp(self): super(BinTestCase, self).setUp() content = ("[DEFAULT]\n" "rpc_backend=fake\n" "[database]\n" "connection=log://localhost\n") if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='ceilometer', suffix='.conf') def tearDown(self): super(BinTestCase, self).tearDown() os.remove(self.tempfile) def test_dbsync_run(self): subp = subprocess.Popen(['ceilometer-dbsync', "--config-file=%s" % self.tempfile]) self.assertEqual(0, subp.wait()) def test_run_expirer_ttl_disabled(self): subp = subprocess.Popen(['ceilometer-expirer', '-d', "--config-file=%s" % self.tempfile], stdout=subprocess.PIPE) stdout, __ = subp.communicate() self.assertEqual(0, subp.poll()) self.assertIn(b"Nothing to clean, database metering " b"time to live is disabled", stdout) self.assertIn(b"Nothing to clean, database event " b"time to live is disabled", stdout) def _test_run_expirer_ttl_enabled(self, ttl_name, data_name): content = ("[DEFAULT]\n" "rpc_backend=fake\n" "[database]\n" "%s=1\n" "connection=log://localhost\n" % ttl_name) if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='ceilometer', suffix='.conf') subp = subprocess.Popen(['ceilometer-expirer', '-d', "--config-file=%s" % self.tempfile], stdout=subprocess.PIPE) stdout, __ = subp.communicate() self.assertEqual(0, subp.poll()) msg = "Dropping %s data with TTL 1" % data_name if six.PY3: msg = msg.encode('utf-8') self.assertIn(msg, stdout) def test_run_expirer_ttl_enabled(self): self._test_run_expirer_ttl_enabled('metering_time_to_live', 'metering') self._test_run_expirer_ttl_enabled('time_to_live', 'metering') self._test_run_expirer_ttl_enabled('event_time_to_live', 'event') class BinSendSampleTestCase(base.BaseTestCase): def setUp(self): super(BinSendSampleTestCase, self).setUp() pipeline_cfg_file = self.path_get('etc/ceilometer/pipeline.yaml') content = ("[DEFAULT]\n" "rpc_backend=fake\n" "pipeline_cfg_file={0}\n".format(pipeline_cfg_file)) if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='ceilometer', suffix='.conf') def tearDown(self): super(BinSendSampleTestCase, self).tearDown() os.remove(self.tempfile) def test_send_counter_run(self): subp = subprocess.Popen(['ceilometer-send-sample', "--config-file=%s" % self.tempfile, "--sample-resource=someuuid", "--sample-name=mycounter"]) self.assertEqual(0, subp.wait()) class BinCeilometerPollingServiceTestCase(base.BaseTestCase): def setUp(self): super(BinCeilometerPollingServiceTestCase, self).setUp() self.tempfile = None self.subp = None def tearDown(self): if self.subp: try: self.subp.kill() except OSError: pass os.remove(self.tempfile) super(BinCeilometerPollingServiceTestCase, self).tearDown() def test_starting_with_duplication_namespaces(self): content = ("[DEFAULT]\n" "rpc_backend=fake\n" "[database]\n" "connection=log://localhost\n") if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='ceilometer', suffix='.conf') self.subp = subprocess.Popen(['ceilometer-polling', "--config-file=%s" % self.tempfile, "--polling-namespaces", "compute", "compute"], stderr=subprocess.PIPE) expected = (b'Duplicated values: [\'compute\', \'compute\'] ' b'found in CLI options, auto de-duplicated') # NOTE(gordc): polling process won't quit so wait for a bit and check start = time.time() while time.time() - start < 5: output = self.subp.stderr.readline() if expected in output: break else: self.fail('Did not detect expected warning: %s' % expected) def test_polling_namespaces_invalid_value_in_config(self): content = ("[DEFAULT]\n" "rpc_backend=fake\n" "polling_namespaces = ['central']\n" "[database]\n" "connection=log://localhost\n") if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='ceilometer', suffix='.conf') self.subp = subprocess.Popen( ["ceilometer-polling", "--config-file=%s" % self.tempfile], stderr=subprocess.PIPE) __, err = self.subp.communicate() expected = ("Exception: Valid values are ['compute', 'central', " "'ipmi'], but found [\"['central']\"]") self.assertIn(expected, err) ceilometer-6.1.5/ceilometer/tests/functional/gabbi/0000775000567000056710000000000013072745164023525 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/functional/gabbi/gabbi_pipeline.yaml0000664000567000056710000000067413072744706027352 0ustar jenkinsjenkins00000000000000# A limited pipeline for use with the Gabbi spike. # direct writes to the the metering database without using an # intermediary dispatcher. # # This is one of several things that will need some extensive # tidying to be more right. --- sources: - name: meter_source interval: 1 meters: - "*" sinks: - meter_sink sinks: - name: meter_sink transformers: publishers: - direct:// ceilometer-6.1.5/ceilometer/tests/functional/gabbi/gabbits/0000775000567000056710000000000013072745164025140 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/functional/gabbi/gabbits/samples.yaml0000664000567000056710000001053613072744706027476 0ustar jenkinsjenkins00000000000000# # Explore and test the samples controller, using samples supplied by # the SampleDataFixture. # fixtures: - ConfigFixture - SampleDataFixture tests: # Confirm all the samples are there and expected requests behave. # TODO(chdent): There's a danger here that the ordering of multiple # samples will not be consistent. - name: lists samples url: /v2/samples response_headers: content-type: /application/json/ response_json_paths: $[0].meter: livestock $[0].metadata.breed: cow $[1].metadata.breed: pig $[2].metadata.breed: sheep - name: get just one url: /v2/samples/$RESPONSE['$[0].id'] response_json_paths: $.meter: livestock $.metadata.breed: cow - name: list samples with limit url: /v2/samples?limit=1 response_json_paths: $[0].meter: livestock $[0].metadata.breed: cow $[-1].metadata.breed: cow - name: list zero samples with zero limit url: /v2/samples?limit=0 status: 400 - name: list samples with query url: /v2/samples?q.field=resource_metadata.breed&q.value=cow&q.op=eq response_json_paths: $[0].meter: livestock $[0].metadata.breed: cow $[-1].metadata.breed: cow - name: query by user url: /v2/samples?q.field=user&q.value=$RESPONSE['$[0].user_id']&q.op=eq response_json_paths: $[0].user_id: $RESPONSE['$[0].user_id'] - name: query by user_id url: /v2/samples?q.field=user_id&q.value=$RESPONSE['$[0].user_id']&q.op=eq response_json_paths: $[0].user_id: $RESPONSE['$[0].user_id'] - name: query by project url: /v2/samples?q.field=project&q.value=$RESPONSE['$[0].project_id']&q.op=eq response_json_paths: $[0].project_id: $RESPONSE['$[0].project_id'] - name: query by project_id url: /v2/samples?q.field=project_id&q.value=$RESPONSE['$[0].project_id']&q.op=eq response_json_paths: $[0].project_id: $RESPONSE['$[0].project_id'] # Explore failure modes for listing samples - name: list samples with bad field url: /v2/samples?q.field=harpoon&q.value=cow&q.op=eq status: 400 response_strings: - timestamp - project - unrecognized field in query - name: list samples with bad metaquery field url: /v2/samples?q.field=metaquery&q.value=cow&q.op=eq status: 400 response_strings: - unrecognized field in query - name: bad limit value url: /v2/samples?limit=happiness status: 400 response_strings: - Invalid input for field/attribute limit - name: negative limit value 400 url: /v2/samples?limit=-99 status: 400 - name: negative limit value error message url: /v2/samples?limit=-99 status: 400 response_headers: content-type: /application/json/ response_json_paths: $.error_message.faultstring: Limit must be positive - name: bad accept desc: try an unexpected content type url: /v2/samples request_headers: accept: text/plain status: 406 - name: complex good accept desc: client sends complex accept do we adapt url: /v2/samples request_headers: accept: text/plain, application/json; q=0.8 - name: complex bad accept desc: client sends complex accept do we adapt url: /v2/samples request_headers: accept: text/plain, application/binary; q=0.8 status: 406 - name: bad method url: /v2/samples method: POST status: 405 response_headers: allow: GET # Work with just one sample. - name: list one of the samples url: /v2/samples?limit=1 - name: retrieve one sample url: /v2/samples/$RESPONSE['$[0].id'] response_headers: content-type: /application/json/ response_json_paths: $.meter: livestock - name: retrieve sample with useless query url: /v2/samples/$RESPONSE['$.id']?limit=5 status: 400 response_strings: - "Unknown argument:" - name: attempt missing sample url: /v2/samples/davesnothere status: 404 response_headers: content-type: /application/json/ response_json_paths: $.error_message.faultstring: Sample davesnothere Not Found ceilometer-6.1.5/ceilometer/tests/functional/gabbi/gabbits/basic.yaml0000664000567000056710000000127713072744706027115 0ustar jenkinsjenkins00000000000000# # Some simple tests just to confirm that the system works. # fixtures: - ConfigFixture tests: # Root gives us some information on where to go from here. - name: quick root check url: / response_headers: content-type: application/json; charset=UTF-8 response_strings: - '"base": "application/json"' response_json_paths: versions.values.[0].status: stable versions.values.[0].media-types.[0].base: application/json # NOTE(chdent): Ideally since / has a links ref to /v2, /v2 ought not 404! - name: v2 visit desc: this demonstrates a bug in the info in / url: $RESPONSE['versions.values.[0].links.[0].href'] status: 404 ceilometer-6.1.5/ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml0000664000567000056710000000620013072744706030547 0ustar jenkinsjenkins00000000000000# Post a simple sample, sir, and the retrieve it in various ways. fixtures: - ConfigFixture tests: # POST one sample and verify its existence. - name: post sample for meter desc: post a single sample url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: | [ { "counter_name": "apples", "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68", "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff", "counter_unit": "instance", "counter_volume": 1, "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36", "resource_metadata": { "name2": "value2", "name1": "value1" }, "counter_type": "gauge" } ] response_json_paths: $.[0].counter_name: apples status: 201 response_headers: content-type: application/json; charset=UTF-8 # When POSTing a sample perhaps we should get back a location header # with the URI of the posted sample - name: post a sample expect location desc: https://bugs.launchpad.net/ceilometer/+bug/1426426 xfail: true url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: - counter_name: apples project_id: 35b17138-b364-4e6a-a131-8f3099c5be68 user_id: efd87807-12d2-4b38-9c70-5f5c2ac427ff counter_unit: instance counter_volume: 1 resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 resource_metadata: name2: value2 name1: value1 counter_type: gauge response_headers: location: /$SCHEME://$NETLOC/ # GET all the samples created for the apples meter - name: get samples for meter desc: get all the samples at that meter url: /v2/meters/apples response_json_paths: $.[0].counter_name: apples $.[0].counter_volume: 1 $.[0].resource_metadata.name2: value2 # POSTing a sample to a meter will implicitly create a resource - name: get resources desc: get the resources that exist because of the sample url: /v2/resources response_json_paths: $.[0].metadata.name2: value2 # NOTE(chdent): We assume that the first item in links is self. # Need to determine how to express the more correct JSONPath here # (if possible). - name: get resource desc: get just one of those resources via self url: $RESPONSE['$[0].links[0].href'] response_json_paths: $.metadata.name2: value2 # GET the created samples - name: get samples desc: get all the created samples url: /v2/samples response_json_paths: $.[0].metadata.name2: value2 $.[0].meter: apples - name: get one sample desc: get the one sample that exists url: /v2/samples/$RESPONSE['$[0].id'] response_json_paths: $.metadata.name2: value2 $.meter: apples ceilometer-6.1.5/ceilometer/tests/functional/gabbi/gabbits/meters.yaml0000664000567000056710000002560713072744706027336 0ustar jenkinsjenkins00000000000000# # Tests to explore and cover the /v2/meters section of the # Ceilometer API. # fixtures: - ConfigFixture tests: # Generic HTTP health explorations of all meters. - name: empty meters list url: /v2/meters response_headers: content-type: /application/json/ response_strings: - "[]" - name: meters list bad accept url: /v2/meters request_headers: accept: text/plain status: 406 - name: meters list bad method url: /v2/meters method: POST status: 405 response_headers: allow: GET - name: try to delete meters url: /v2/meters method: DELETE status: 405 response_headers: allow: GET # Generic HTTP health explorations of single meter. - name: get non exist meter url: /v2/meters/noexist response_strings: - "[]" - name: meter bad accept url: /v2/meters/noexist?direct=True request_headers: accept: text/plain status: 406 - name: meter delete noexist url: /v2/meters/noexist method: DELETE status: "404 || 405" - name: post meter no data url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: "" status: 400 - name: post meter error is JSON url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: "" status: 400 response_headers: content-type: /application/json/ response_json_paths: $.error_message.faultstring: "Samples should be included in request body" - name: post meter bad content-type url: /v2/meters/apples?direct=True method: POST request_headers: content-type: text/plain data: hello status: 415 - name: post bad samples to meter url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: samples: - red - blue - yellow status: 400 # POST variations on a malformed sample - name: post limited counter to meter url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: - counter_unit: instance counter_volume: 1 resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 status: 400 response_strings: - "Invalid input for field/attribute counter_name" - name: post mismatched counter name to meter url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: - counter_name: cars counter_type: gauge counter_unit: instance counter_volume: 1 resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 status: 400 response_strings: - "Invalid input for field/attribute counter_name" - "should be apples" - name: post counter no resource to meter url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: - counter_name: apples counter_type: gauge counter_unit: instance counter_volume: 1 status: 400 response_strings: - "Invalid input for field/attribute resource_id" - "Mandatory field missing." - name: post counter bad type to meter url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: - counter_name: apples counter_type: elevation counter_unit: instance counter_volume: 1 resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 status: 400 response_strings: - "Invalid input for field/attribute counter_type." - "The counter type must be: gauge, delta, cumulative" # Manipulate samples - name: post counter to meter url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: - counter_name: apples counter_type: gauge counter_unit: instance counter_volume: 1 resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 status: 201 - name: list apple samples url: /v2/meters/apples response_json_paths: $[0].counter_volume: 1.0 $[0].counter_name: apples $[0].resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - name: list meters url: /v2/meters response_json_paths: $[0].name: apples $[0].resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 $[0].type: gauge $[-1].name: apples - name: negative limit on meter list url: /v2/meters/apples?limit=-5 status: 400 response_strings: - Limit must be positive - name: nan limit on meter list url: /v2/meters/apples?limit=NaN status: 400 response_strings: - unable to convert to int - name: post counter to meter different resource url: /v2/meters/apples?direct=True method: POST status: 201 request_headers: content-type: application/json data: - counter_name: apples counter_type: gauge counter_unit: instance counter_volume: 2 resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - name: query for resource url: /v2/meters/apples?q.field=resource_id&q.value=aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa&q.op=eq response_json_paths: $[0].resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa $[-1].resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa # Explore posting samples with less than perfect data. - name: post counter with bad timestamp url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: - counter_name: apples counter_type: gauge counter_unit: instance counter_volume: 3 resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa timestamp: "2013-01-bad 23:23:20" status: 400 response_strings: - 'Invalid input for field/attribute samples' - name: post counter with good timestamp url: /v2/meters/apples?direct=True method: POST status: 201 request_headers: content-type: application/json data: - counter_name: apples counter_type: gauge counter_unit: instance counter_volume: 3 resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa timestamp: "2013-01-01 23:23:20" - name: post counter with wrong metadata url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: - counter_name: apples counter_type: gauge counter_unit: instance counter_volume: 3 resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa timestamp: "2013-01-01 23:23:20" resource_metadata: "a string" status: 400 response_strings: - "Invalid input for field/attribute samples" - name: post counter with empty metadata url: /v2/meters/apples?direct=True method: POST status: 201 request_headers: content-type: application/json data: - counter_name: apples counter_type: gauge counter_unit: instance counter_volume: 3 resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa timestamp: "2013-01-01 23:23:20" resource_metadata: {} # Statistics - name: get sample statistics url: /v2/meters/apples/statistics response_json_paths: $[0].groupby: null $[0].unit: instance $[0].sum: 9.0 $[0].min: 1.0 $[0].max: 3.0 $[0].count: 4 - name: get incorrectly grouped sample statistics url: /v2/meters/apples/statistics?groupby=house_id status: 400 response_strings: - Invalid groupby fields - name: get grouped sample statistics url: /v2/meters/apples/statistics?groupby=resource_id response_json_paths: $[1].max: 3.0 $[0].max: 1.0 - name: get sample statistics bad period url: /v2/meters/apples/statistics?period=seven status: 400 response_strings: - unable to convert to int - name: get sample statistics negative period url: /v2/meters/apples/statistics?period=-7 status: 400 response_strings: - Period must be positive. - name: get sample statistics 600 period url: /v2/meters/apples/statistics?period=600 response_json_paths: $[0].period: 600 - name: get sample statistics time limit not time url: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=Remember%20Remember status: 400 response_strings: - invalid timestamp format - name: get sample statistics time limit gt url: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=2014-01-01 response_json_paths: $[0].count: 2 - name: get sample statistics time limit lt url: /v2/meters/apples/statistics?q.field=timestamp&q.op=lt&q.value=2014-01-01 response_json_paths: $[0].count: 2 - name: get sample statistics time limit bounded url: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=2013-06-01&q.field=timestamp&q.op=lt&q.value=2014-01-01 response_strings: - "[]" - name: get sample statistics select aggregate bad format url: /v2/meters/apples/statistics?aggregate=max status: 400 - name: get sample statistics select aggregate url: /v2/meters/apples/statistics?aggregate.func=max response_json_paths: $[0].aggregate.max: 3.0 # limit meters results - name: get meters ulimited url: /v2/meters response_json_paths: $.`len`: 2 - name: get meters limited url: /v2/meters?limit=1 response_json_paths: $.`len`: 1 - name: get meters double limit url: /v2/meters?limit=1&limit=1 status: 400 - name: get meters filter limit desc: expressing limit this way is now disallowed url: /v2/meters?q.field=limit&q.op=eq&q.type=&q.value=1 status: 400 response_strings: - 'Unknown argument: \"limit\": unrecognized field in query' - name: get meters filter limit and limit url: /v2/meters?q.field=limit&q.op=eq&q.type=&q.value=1&limit=1 status: 400 response_strings: - 'Unknown argument: \"limit\": unrecognized field in query' ceilometer-6.1.5/ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml0000664000567000056710000000262213072744706031175 0ustar jenkinsjenkins00000000000000# # Explore and cover resources API with gabbi tests when there are no # resources. # fixtures: - ConfigFixture tests: # Check for a list of resources, modifying the request in various # ways. - name: list resources no extra desc: Provide no additional header guidelines url: /v2/resources response_headers: content-type: /application/json/ response_strings: - "[]" - name: list resources but get url wrong url: /v2/resrces status: 404 - name: list resources explicit accept url: /v2/resources request_headers: accept: application/json response_strings: - "[]" - name: list resources bad accept url: /v2/resources request_headers: accept: text/plain status: 406 - name: list resources with bad query field url: /v2/resources?q.field=id&q.value=cars status: 400 response_strings: - unrecognized field in query - name: list resources with query url: /v2/resources?q.field=resource&q.value=cars response_strings: - "[]" - name: list resource bad type meter links url: /v2/resources?meter_links=yes%20please status: 400 response_strings: - unable to convert to int - name: list resource meter links int url: /v2/resources?meter_links=0 response_strings: - "[]" ceilometer-6.1.5/ceilometer/tests/functional/gabbi/gabbits/api_events_with_data.yaml0000664000567000056710000001647513072744706032223 0ustar jenkinsjenkins00000000000000# These test run against the Events API with data preloaded into the datastore. fixtures: - ConfigFixture - EventDataFixture tests: # this attempts to get all the events and checks to make sure they are valid - name: get all events url: /v2/events request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_json_paths: $.[0].event_type: cookies_chocolate.chip $.[0].traits.[0].value: chocolate.chip $.[0].traits.[1].value: '0' $.[0].raw.nested.inside: value $.[1].event_type: cookies_peanut.butter $.[1].traits.[0].name: type $.[1].traits.[1].name: ate $.[1].raw.nested.inside: value $.[2].event_type: cookies_sugar $.[2].traits.[0].type: string $.[2].traits.[1].type: integer $.[2].raw.nested.inside: value # this attempts to get all the events with invalid parameters and expects a 400 - name: get events with bad params url: /v2/events?bad_Stuff_here request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 400 # this attempts to query the events with the correct parameterized query syntax # and expects a matching event - name: get events that match query url: /v2/events?q.field=event_type&q.op=eq&q.type=string&q.value=cookies_chocolate.chip request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_json_paths: $.[0].event_type: cookies_chocolate.chip $.[0].traits.[0].value: chocolate.chip # this attempts to query the events with the correct data query syntax and # expects a matching event - name: get events that match query via data url: /v2/events request_headers: content-type: application/json; charset=UTF-8 X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: q: - field: event_type op: eq type: string value: cookies_chocolate.chip response_headers: content-type: application/json; charset=UTF-8 response_json_paths: $.[0].event_type: cookies_chocolate.chip $.[0].traits.[0].value: chocolate.chip # this attempts to query the events with the correct parameterized query syntax # but a bad field name and expects an empty list - name: get events that match bad query url: /v2/events?q.field=bad_field&q.op=eq&q.type=string&q.value=cookies_chocolate.chip request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # this attempts to query the events with the correct data query syntax and # a bad field name and expects an empty list - name: get events that match bad query via data url: /v2/events request_headers: content-type: application/json; charset=UTF-8 X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: q: - field: bad_field op: eq type: string value: cookies_chocolate.chip response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # this attempts to query the events with the wrong data query syntax missing the # q object but supplying the field list and a bad field name and expects a 400 - name: get events that match bad query via data list url: /v2/events request_headers: content-type: application/json; charset=UTF-8 X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: - field: bad_field op: eq type: string value: cookies_chocolate.chip status: 400 # Get a single event by message_id should return an event - name: get a single event url: /v2/events/fea1b15a-1d47-4175-85a5-a4bb2c729240 request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_json_paths: $.event_type: cookies_chocolate.chip $.traits.[0].value: chocolate.chip $.traits.[1].value: '0' # Get a single event by message_id no data is present so should return a 404 - name: get a single event that does not exist url: /v2/events/bad-id request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 404 # Get all the event types should return a list of event types - name: get all event types url: /v2/event_types request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - cookies_chocolate.chip - cookies_peanut.butter - cookies_sugar # Get a single event type by valid name, this API is unused and should return a 404 - name: get event types for good event_type unused api url: /v2/event_types/cookies_chocolate.chip request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 404 # Get a single event type by invalid name, this API is unused and should return a 404 - name: get event types for bad event_type unused api url: /v2/event_types/bad_event_type request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 404 # Get all traits for a valid event type should return an list of traits - name: get all traits for event type url: /v2/event_types/cookies_chocolate.chip/traits request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_json_paths: $.[0].type: string $.[1].name: ate # Get all traits for an invalid event type should return an empty list - name: get all traits names for event type bad event type url: /v2/event_types/bad_event_type/traits request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # Get all traits of type ate for a valid event type should return an list of # traits - name: get all traits of type ate for event type url: /v2/event_types/cookies_chocolate.chip/traits/ate request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_json_paths: $.[0].name: ate $.[0].value: '0' # Get all traits of type ate for a invalid event type should return an empty # list - name: get all traits of type for event type bad event type url: /v2/event_types/bad_event_type/traits/ate request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # Get all traits of type bad_trait_name for a valid event type should return an # empty list - name: get all traits of type instances for event type bad trait name url: /v2/event_types/cookies_chocolate.chip/traits/bad_trait_name request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" ceilometer-6.1.5/ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml0000664000567000056710000000046013072744706030456 0ustar jenkinsjenkins00000000000000# # Explore the capabilities API # fixtures: - ConfigFixture tests: - name: get capabilities desc: retrieve capabilities for the mongo store url: /v2/capabilities response_json_paths: $.event_storage.['storage:production_ready']: true $.storage.['storage:production_ready']: true ceilometer-6.1.5/ceilometer/tests/functional/gabbi/gabbits/middleware.yaml0000664000567000056710000000210513072744703030135 0ustar jenkinsjenkins00000000000000# # Test the middlewares. Just CORS for now. # fixtures: - ConfigFixture - CORSConfigFixture tests: - name: valid cors options OPTIONS: / status: 200 request_headers: origin: http://valid.example.com access-control-request-method: GET response_headers: access-control-allow-origin: http://valid.example.com - name: invalid cors options OPTIONS: / status: 200 request_headers: origin: http://invalid.example.com access-control-request-method: GET response_forbidden_headers: - access-control-allow-origin - name: valid cors get GET: / status: 200 request_headers: origin: http://valid.example.com access-control-request-method: GET response_headers: access-control-allow-origin: http://valid.example.com - name: invalid cors get GET: / status: 200 request_headers: origin: http://invalid.example.com response_forbidden_headers: - access-control-allow-origin ceilometer-6.1.5/ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml0000664000567000056710000000477413072744706031703 0ustar jenkinsjenkins00000000000000# # Explore and cover resources API with gabbi tests when there are a # small number of pre-existing resources # fixtures: - ConfigFixture - SampleDataFixture tests: - name: list all resources url: /v2/resources response_json_paths: $[0].user_id: farmerjon $[0].links[1].rel: livestock - name: get one resource desc: get a resource via the links in the first resource listed above url: $RESPONSE['$[0].links[0].href'] response_json_paths: $.resource_id: $RESPONSE['$[0].resource_id'] - name: list resources limit user_id url: /v2/resources?q.field=user_id&q.value=farmerjon response_json_paths: $[0].user_id: farmerjon $[0].links[1].rel: livestock - name: list resources limit metadata url: /v2/resources?q.field=metadata.breed&q.value=sheep response_json_paths: $[0].user_id: farmerjon $[0].links[1].rel: livestock - name: list resources limit metadata no match url: /v2/resources?q.field=metadata.breed&q.value=llamma response_strings: - "[]" - name: fail to get one resource url: /v2/resources/nosirnothere status: 404 - name: list resource meter links present url: /v2/resources?meter_links=1 response_json_paths: $[0].links[0].rel: self $[0].links[1].rel: livestock $[0].links[-1].rel: livestock - name: list resource meter links not present url: /v2/resources?meter_links=0 desc: there is only one links entry when meter_links is 0 response_json_paths: $[0].links[0].rel: self $[0].links[-1].rel: self # limit resource results - name: get resources ulimited url: /v2/resources response_json_paths: $.`len`: 1 - name: get resources limited url: /v2/resources?limit=1 response_json_paths: $.`len`: 1 - name: get resources double limit url: /v2/resources?limit=1&limit=1 status: 400 - name: get resources filter limit desc: expressing limit this way is now disallowed url: /v2/resources?q.field=limit&q.op=eq&q.type=&q.value=1 status: 400 response_strings: - 'Unknown argument: \"limit\": unrecognized field in query' - name: get resources filter limit and limit url: /v2/resources?q.field=limit&q.op=eq&q.type=&q.value=1&limit=1 status: 400 response_strings: - 'Unknown argument: \"limit\": unrecognized field in query' ceilometer-6.1.5/ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml0000664000567000056710000000070413072744706031156 0ustar jenkinsjenkins00000000000000# # Demonstrate a simple sample fixture. # fixtures: - ConfigFixture - SampleDataFixture tests: - name: get fixture samples desc: get all the samples at livestock url: /v2/meters/livestock response_json_paths: $.[0].counter_name: livestock $.[1].counter_name: livestock $.[2].counter_name: livestock $.[2].user_id: farmerjon $.[0].resource_metadata.breed: cow $.[1].resource_metadata.farmed_by: nancy ceilometer-6.1.5/ceilometer/tests/functional/gabbi/gabbits/api_events_no_data.yaml0000664000567000056710000001411513072744706031651 0ustar jenkinsjenkins00000000000000# These test run against the Events API with no data preloaded into the # datastore. This allows us to verify that requests are still processed # normally even if data is missing for that endpoint. fixtures: - ConfigFixture tests: # this attempts to get all the events and expects an empty list back - name: get all events url: /v2/events request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # this attempts to get all the events with no role/user/project # info in header and expects a 403 - name: get events with bad headers url: /v2/events status: 403 # this attempts to get all the events with no user/project # info in header and expects a 403 - name: get events with admin only header url: /v2/events request_headers: X-Roles: admin status: 403 # this attempts to get all the events with no project # info in header and expects a 403 - name: get events with no project header url: /v2/events request_headers: X-Roles: admin X-User-Id: user1 status: 403 # this attempts to get all the events with no user # info in header and expects a 403 - name: get events with no user header url: /v2/events request_headers: X-Roles: admin X-Project-Id: project1 status: 403 # this attempts to get all the events with invalid parameters and expects a 400 - name: get events with bad params url: /v2/events?bad_Stuff_here request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 400 # this attempts to query the events with the correct parameterized query syntax # and expects an empty list - name: get events that match query url: /v2/events?q.field=event_type&q.op=eq&q.type=string&q.value=cookies_chocolate.chip request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # this attempts to query the events with the correct data query syntax and # expects an empty list - name: get events that match query via request data url: /v2/events request_headers: content-type: application/json; charset=UTF-8 X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: q: - field: event_type op: eq type: string value: cookies_chocolate.chip response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # this attempts to query the events with the correct parameterized query syntax # but a bad field name and expects an empty list - name: get events that match bad query url: /v2/events?q.field=bad_field&q.op=eq&q.type=string&q.value=cookies_chocolate.chip request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # this attempts to query the events with the correct data query syntax and # a bad field name and expects an empty list - name: get events that match bad query via request data url: /v2/events request_headers: content-type: application/json; charset=UTF-8 X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: q: - field: bad_field op: eq type: string value: cookies_chocolate.chip response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # this attempts to query the events with the wrong data query syntax missing the # q object but supplying the field list and a bad field name and expects a 400 - name: get events that match bad query via request data malformed list url: /v2/events request_headers: content-type: application/json; charset=UTF-8 X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: - field: bad_field op: eq type: string value: cookies_chocolate.chip status: 400 # this attempts to query the events with the wrong data query syntax missing the # q object but supplying the field list along with a bad content-type. Should # return a 400 - name: get events that match bad query via request data wrong type url: /v2/events request_headers: content-type: text/plain X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: "field: bad_field op: eq type: string value: cookies_chocolate.chip xfail: True" status: 415 # Get a single event by message_id no data is present so should return a 404 - name: get a single event url: /v2/events/fea1b15a-1d47-4175-85a5-a4bb2c729240 request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 404 # Get all the event types should return an empty list - name: get all event types url: /v2/event_types request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # Get a single event type by name, this API is unused and should return a 404 - name: get event types for good event_type unused api url: /v2/event_types/cookies_chocolate.chip request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 404 # Get all traits for an event type should return an empty list - name: get all traits for event type url: /v2/event_types/cookies_chocolate.chip/traits request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # Get all traits named ate for an event type should return an empty list - name: get all traits named ate for event type url: /v2/event_types/cookies_chocolate.chip/traits/ate request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" ceilometer-6.1.5/ceilometer/tests/functional/gabbi/test_gabbi.py0000664000567000056710000000226513072744706026210 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Red Hat. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A test module to exercise the Ceilometer API with gabbi For the sake of exploratory development. """ import os from gabbi import driver from ceilometer.api import app from ceilometer.tests.functional.gabbi import fixtures as fixture_module TESTS_DIR = 'gabbits' def load_tests(loader, tests, pattern): """Provide a TestSuite to the discovery process.""" test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) return driver.build_tests(test_dir, loader, host=None, intercept=app.load_app, fixture_module=fixture_module) ceilometer-6.1.5/ceilometer/tests/functional/gabbi/gabbi_paste.ini0000664000567000056710000000143013072744706026465 0ustar jenkinsjenkins00000000000000# Ceilometer API WSGI Pipeline # Define the filters that make up the pipeline for processing WSGI requests # Note: This pipeline is PasteDeploy's term rather than Ceilometer's pipeline # used for processing samples # # This version is specific for gabbi. It removes support for keystone while # keeping suport for CORS. # Remove authtoken from the pipeline if you don't want to use keystone authentication [pipeline:main] pipeline = cors api-server [app:api-server] paste.app_factory = ceilometer.api.app:app_factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = ceilometer ceilometer-6.1.5/ceilometer/tests/functional/gabbi/gabbits_prefix/0000775000567000056710000000000013072745164026515 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/functional/gabbi/gabbits_prefix/basic.yaml0000664000567000056710000000102213072744706030456 0ustar jenkinsjenkins00000000000000# # Confirm root reports the right data including a prefixed URL # fixtures: - ConfigFixture tests: # Root gives us some information on where to go from here. - name: quick root check url: / response_headers: content-type: application/json; charset=UTF-8 response_strings: - '"base": "application/json"' response_json_paths: versions.values.[0].status: stable versions.values.[0].media-types.[0].base: application/json response_strings: - /telemetry/ ceilometer-6.1.5/ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml0000664000567000056710000000273113072744706032131 0ustar jenkinsjenkins00000000000000# Post a simple sample and confirm the created resource has # reasonable URLs fixtures: - ConfigFixture tests: # POST one sample and verify its existence. - name: post sample for meter desc: post a single sample url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: | [ { "counter_name": "apples", "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68", "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff", "counter_unit": "instance", "counter_volume": 1, "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36", "resource_metadata": { "name2": "value2", "name1": "value1" }, "counter_type": "gauge" } ] response_json_paths: $.[0].counter_name: apples status: 201 response_headers: content-type: application/json; charset=UTF-8 - name: get resources desc: get the resources that exist because of the sample url: /v2/resources response_json_paths: $.[0].metadata.name2: value2 - name: get resource desc: get just one of those resources via self url: $RESPONSE['$[0].links[0].href'] response_json_paths: $.metadata.name2: value2 response_strings: - /telemetry/ ceilometer-6.1.5/ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml0000664000567000056710000000114413072744706033244 0ustar jenkinsjenkins00000000000000# # Explore and cover resources API with gabbi tests when there are a # small number of pre-existing resources # fixtures: - ConfigFixture - SampleDataFixture tests: - name: list all resources url: /v2/resources response_json_paths: $[0].user_id: farmerjon $[0].links[1].rel: livestock response_strings: - /telemetry/ - name: get one resource desc: get a resource via the links in the first resource listed above url: $RESPONSE['$[0].links[0].href'] response_json_paths: $.resource_id: $RESPONSE['$[0].resource_id'] ceilometer-6.1.5/ceilometer/tests/functional/gabbi/test_gabbi_prefix.py0000664000567000056710000000232213072744706027557 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Red Hat. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A module to exercise the Ceilometer API with gabbi with a URL prefix""" import os from gabbi import driver from ceilometer.api import app from ceilometer.tests.functional.gabbi import fixtures as fixture_module TESTS_DIR = 'gabbits_prefix' def load_tests(loader, tests, pattern): """Provide a TestSuite to the discovery process.""" test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) return driver.build_tests(test_dir, loader, host=None, prefix='/telemetry', intercept=app.setup_app, fixture_module=fixture_module) ceilometer-6.1.5/ceilometer/tests/functional/gabbi/fixtures.py0000664000567000056710000001606613072744706025762 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Red Hat. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fixtures used during Gabbi-based test runs.""" import datetime import os import random from unittest import case import uuid from gabbi import fixture from oslo_config import cfg from oslo_config import fixture as fixture_config from oslo_policy import opts from oslo_utils import fileutils import six from six.moves.urllib import parse as urlparse from ceilometer.event.storage import models from ceilometer.publisher import utils from ceilometer import sample from ceilometer import storage # TODO(chdent): For now only MongoDB is supported, because of easy # database name handling and intentional focus on the API, not the # data store. ENGINES = ['mongodb'] class ConfigFixture(fixture.GabbiFixture): """Establish the relevant configuration for a test run.""" def start_fixture(self): """Set up config.""" self.conf = None # Determine the database connection. db_url = os.environ.get('OVERTEST_URL', "sqlite://").replace( "mysql://", "mysql+pymysql://") if not db_url: raise case.SkipTest('No database connection configured') engine = urlparse.urlparse(db_url).scheme if engine not in ENGINES: raise case.SkipTest('Database engine not supported') conf = fixture_config.Config().conf self.conf = conf self.conf([], project='ceilometer', validate_default_values=True) opts.set_defaults(self.conf) conf.import_group('api', 'ceilometer.api.controllers.v2.root') conf.import_opt('store_events', 'ceilometer.notification', group='notification') content = ('{"default": ""}') if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='policy', suffix='.json') conf.set_override("policy_file", self.tempfile, group='oslo_policy') conf.set_override( 'api_paste_config', os.path.abspath( 'ceilometer/tests/functional/gabbi/gabbi_paste.ini') ) # A special pipeline is required to use the direct publisher. conf.set_override('pipeline_cfg_file', 'ceilometer/tests/functional/gabbi_pipeline.yaml') database_name = '%s-%s' % (db_url, str(uuid.uuid4())) conf.set_override('connection', database_name, group='database') conf.set_override('metering_connection', '', group='database') conf.set_override('event_connection', '', group='database') conf.set_override('pecan_debug', True, group='api') conf.set_override('gnocchi_is_enabled', False, group='api') conf.set_override('aodh_is_enabled', False, group='api') conf.set_override('store_events', True, group='notification') def stop_fixture(self): """Reset the config and remove data.""" if self.conf: storage.get_connection_from_config(self.conf).clear() self.conf.reset() class SampleDataFixture(fixture.GabbiFixture): """Instantiate some sample data for use in testing.""" def start_fixture(self): """Create some samples.""" conf = fixture_config.Config().conf self.conn = storage.get_connection_from_config(conf) timestamp = datetime.datetime.utcnow() project_id = str(uuid.uuid4()) self.source = str(uuid.uuid4()) resource_metadata = {'farmed_by': 'nancy'} for name in ['cow', 'pig', 'sheep']: resource_metadata.update({'breed': name}), c = sample.Sample(name='livestock', type='gauge', unit='head', volume=int(10 * random.random()), user_id='farmerjon', project_id=project_id, resource_id=project_id, timestamp=timestamp, resource_metadata=resource_metadata, source=self.source) data = utils.meter_message_from_counter( c, conf.publisher.telemetry_secret) self.conn.record_metering_data(data) def stop_fixture(self): """Destroy the samples.""" # NOTE(chdent): print here for sake of info during testing. # This will go away eventually. print('resource', self.conn.db.resource.remove({'source': self.source})) print('meter', self.conn.db.meter.remove({'source': self.source})) class EventDataFixture(fixture.GabbiFixture): """Instantiate some sample event data for use in testing.""" def start_fixture(self): """Create some events.""" conf = fixture_config.Config().conf self.conn = storage.get_connection_from_config(conf, 'event') events = [] name_list = ['chocolate.chip', 'peanut.butter', 'sugar'] for ix, name in enumerate(name_list): timestamp = datetime.datetime.utcnow() message_id = 'fea1b15a-1d47-4175-85a5-a4bb2c72924{}'.format(ix) traits = [models.Trait('type', 1, name), models.Trait('ate', 2, ix)] event = models.Event(message_id, 'cookies_{}'.format(name), timestamp, traits, {'nested': {'inside': 'value'}}) events.append(event) self.conn.record_events(events) def stop_fixture(self): """Destroy the events.""" self.conn.db.event.remove({'event_type': '/^cookies_/'}) class CORSConfigFixture(fixture.GabbiFixture): """Inject mock configuration for the CORS middleware.""" def start_fixture(self): # Here we monkeypatch GroupAttr.__getattr__, necessary because the # paste.ini method of initializing this middleware creates its own # ConfigOpts instance, bypassing the regular config fixture. def _mock_getattr(instance, key): if key != 'allowed_origin': return self._original_call_method(instance, key) return "http://valid.example.com" self._original_call_method = cfg.ConfigOpts.GroupAttr.__getattr__ cfg.ConfigOpts.GroupAttr.__getattr__ = _mock_getattr def stop_fixture(self): """Remove the monkeypatch.""" cfg.ConfigOpts.GroupAttr.__getattr__ = self._original_call_method ceilometer-6.1.5/ceilometer/tests/functional/gabbi/__init__.py0000664000567000056710000000000013072744703025622 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/functional/test_notification.py0000664000567000056710000006250313072744706026567 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Ceilometer notify daemon.""" import shutil import mock from oslo_config import fixture as fixture_config from oslo_context import context import oslo_messaging import oslo_messaging.conffixture import oslo_service.service from oslo_utils import fileutils from oslo_utils import timeutils import six from stevedore import extension import yaml from ceilometer.compute.notifications import instance from ceilometer import messaging from ceilometer import notification from ceilometer.publisher import test as test_publisher from ceilometer.tests import base as tests_base TEST_NOTICE_CTXT = { u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', u'is_admin': True, u'project_id': u'7c150a59fe714e6f9263774af9688f0e', u'quota_class': None, u'read_deleted': u'no', u'remote_address': u'10.0.2.15', u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', u'roles': [u'admin'], u'timestamp': u'2012-05-08T20:23:41.425105', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', } TEST_NOTICE_METADATA = { u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', u'timestamp': u'2012-05-08 20:23:48.028195', } TEST_NOTICE_PAYLOAD = { u'created_at': u'2012-05-08 20:23:41', u'deleted_at': u'', u'disk_gb': 0, u'display_name': u'testme', u'fixed_ips': [{u'address': u'10.0.0.2', u'floating_ips': [], u'meta': {}, u'type': u'fixed', u'version': 4}], u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', u'instance_type': u'm1.tiny', u'instance_type_id': 2, u'launched_at': u'2012-05-08 20:23:47.985999', u'memory_mb': 512, u'state': u'active', u'state_description': u'', u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', u'vcpus': 1, u'root_gb': 0, u'ephemeral_gb': 0, u'host': u'compute-host-name', u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', u'os_type': u'linux?', u'architecture': u'x86', u'image_ref': u'UUID', u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', } class TestNotification(tests_base.BaseTestCase): def setUp(self): super(TestNotification, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF.set_override("connection", "log://", group='database') self.CONF.set_override("backend_url", None, group="coordination") self.CONF.set_override("store_events", False, group="notification") self.CONF.set_override("disable_non_metric_meters", False, group="notification") self.setup_messaging(self.CONF) self.srv = notification.NotificationService() def fake_get_notifications_manager(self, pm): self.plugin = instance.Instance(pm) return extension.ExtensionManager.make_test_instance( [ extension.Extension('test', None, None, self.plugin) ] ) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', mock.MagicMock()) @mock.patch('ceilometer.event.endpoint.EventsNotificationEndpoint') def _do_process_notification_manager_start(self, fake_event_endpoint_class): with mock.patch.object(self.srv, '_get_notifications_manager') as get_nm: get_nm.side_effect = self.fake_get_notifications_manager self.srv.start() self.fake_event_endpoint = fake_event_endpoint_class.return_value def test_start_multiple_listeners(self): urls = ["fake://vhost1", "fake://vhost2"] self.CONF.set_override("messaging_urls", urls, group="notification") self._do_process_notification_manager_start() self.assertEqual(2, len(self.srv.listeners)) def test_process_notification(self): self._do_process_notification_manager_start() self.srv.pipeline_manager.pipelines[0] = mock.MagicMock() self.plugin.info([{'ctxt': TEST_NOTICE_CTXT, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'compute.instance.create.end', 'payload': TEST_NOTICE_PAYLOAD, 'metadata': TEST_NOTICE_METADATA}]) self.assertEqual(1, len(self.srv.listeners[0].dispatcher.endpoints)) self.assertTrue(self.srv.pipeline_manager.publisher.called) def test_process_notification_no_events(self): self._do_process_notification_manager_start() self.assertEqual(1, len(self.srv.listeners[0].dispatcher.endpoints)) self.assertNotEqual(self.fake_event_endpoint, self.srv.listeners[0].dispatcher.endpoints[0]) @mock.patch('ceilometer.pipeline.setup_event_pipeline', mock.MagicMock()) def test_process_notification_with_events(self): self.CONF.set_override("store_events", True, group="notification") self._do_process_notification_manager_start() self.assertEqual(2, len(self.srv.listeners[0].dispatcher.endpoints)) self.assertEqual(self.fake_event_endpoint, self.srv.listeners[0].dispatcher.endpoints[0]) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) @mock.patch('oslo_messaging.get_batch_notification_listener') def test_unique_consumers(self, mock_listener): def fake_get_notifications_manager_dup_targets(pm): plugin = instance.Instance(pm) return extension.ExtensionManager.make_test_instance( [extension.Extension('test', None, None, plugin), extension.Extension('test', None, None, plugin)]) with mock.patch.object(self.srv, '_get_notifications_manager') as get_nm: get_nm.side_effect = fake_get_notifications_manager_dup_targets self.srv.start() self.assertEqual(1, len(mock_listener.call_args_list)) args, kwargs = mock_listener.call_args self.assertEqual(1, len(args[1])) self.assertIsInstance(args[1][0], oslo_messaging.Target) class BaseRealNotification(tests_base.BaseTestCase): def setup_pipeline(self, counter_names): pipeline = yaml.dump({ 'sources': [{ 'name': 'test_pipeline', 'interval': 5, 'meters': counter_names, 'sinks': ['test_sink'] }], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ['test://'] }] }) if six.PY3: pipeline = pipeline.encode('utf-8') pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, prefix="pipeline", suffix="yaml") return pipeline_cfg_file def setup_event_pipeline(self, event_names): ev_pipeline = yaml.dump({ 'sources': [{ 'name': 'test_event', 'events': event_names, 'sinks': ['test_sink'] }], 'sinks': [{ 'name': 'test_sink', 'publishers': ['test://'] }] }) if six.PY3: ev_pipeline = ev_pipeline.encode('utf-8') ev_pipeline_cfg_file = fileutils.write_to_tempfile( content=ev_pipeline, prefix="event_pipeline", suffix="yaml") return ev_pipeline_cfg_file def setUp(self): super(BaseRealNotification, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf # Dummy config file to avoid looking for system config self.CONF([], project='ceilometer', validate_default_values=True) self.setup_messaging(self.CONF, 'nova') pipeline_cfg_file = self.setup_pipeline(['instance', 'memory']) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self.expected_samples = 2 self.CONF.set_override("backend_url", None, group="coordination") self.CONF.set_override("store_events", True, group="notification") self.CONF.set_override("disable_non_metric_meters", False, group="notification") ev_pipeline_cfg_file = self.setup_event_pipeline( ['compute.instance.*']) self.expected_events = 1 self.CONF.set_override("event_pipeline_cfg_file", ev_pipeline_cfg_file) self.CONF.set_override( "definitions_cfg_file", self.path_get('etc/ceilometer/event_definitions.yaml'), group='event') self.publisher = test_publisher.TestPublisher("") def _check_notification_service(self): self.srv.start() notifier = messaging.get_notifier(self.transport, "compute.vagrant-precise") notifier.info(context.RequestContext(), 'compute.instance.create.end', TEST_NOTICE_PAYLOAD) start = timeutils.utcnow() while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: if (len(self.publisher.samples) >= self.expected_samples and len(self.publisher.events) >= self.expected_events): break self.srv.stop() resources = list(set(s.resource_id for s in self.publisher.samples)) self.assertEqual(self.expected_samples, len(self.publisher.samples)) self.assertEqual(self.expected_events, len(self.publisher.events)) self.assertEqual(["9f9d01b9-4a58-4271-9e27-398b21ab20d1"], resources) class TestRealNotificationReloadablePipeline(BaseRealNotification): def setUp(self): super(TestRealNotificationReloadablePipeline, self).setUp() self.CONF.set_override('refresh_pipeline_cfg', True) self.CONF.set_override('refresh_event_pipeline_cfg', True) self.CONF.set_override('pipeline_polling_interval', 1) self.srv = notification.NotificationService() @mock.patch('ceilometer.publisher.test.TestPublisher') def test_notification_pipeline_poller(self, fake_publisher_cls): fake_publisher_cls.return_value = self.publisher self.srv.tg = mock.MagicMock() self.srv.start() pipeline_poller_call = mock.call(1, self.srv.refresh_pipeline) self.assertIn(pipeline_poller_call, self.srv.tg.add_timer.call_args_list) self.srv.stop() def test_notification_reloaded_pipeline(self): pipeline_cfg_file = self.setup_pipeline(['instance']) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self.srv.start() pipeline = self.srv.pipe_manager # Modify the collection targets updated_pipeline_cfg_file = self.setup_pipeline(['vcpus', 'disk.root.size']) # Move/re-name the updated pipeline file to the original pipeline # file path as recorded in oslo config shutil.move(updated_pipeline_cfg_file, pipeline_cfg_file) self.srv.refresh_pipeline() self.assertNotEqual(pipeline, self.srv.pipe_manager) self.srv.stop() def test_notification_reloaded_event_pipeline(self): ev_pipeline_cfg_file = self.setup_event_pipeline( ['compute.instance.create.start']) self.CONF.set_override("event_pipeline_cfg_file", ev_pipeline_cfg_file) self.CONF.set_override("store_events", True, group="notification") self.srv.start() pipeline = self.srv.event_pipe_manager # Modify the collection targets updated_ev_pipeline_cfg_file = self.setup_event_pipeline( ['compute.instance.*']) # Move/re-name the updated pipeline file to the original pipeline # file path as recorded in oslo config shutil.move(updated_ev_pipeline_cfg_file, ev_pipeline_cfg_file) self.srv.refresh_pipeline() self.assertNotEqual(pipeline, self.srv.pipe_manager) self.srv.stop() class TestRealNotification(BaseRealNotification): def setUp(self): super(TestRealNotification, self).setUp() self.srv = notification.NotificationService() @mock.patch('ceilometer.publisher.test.TestPublisher') def test_notification_service(self, fake_publisher_cls): fake_publisher_cls.return_value = self.publisher self._check_notification_service() @mock.patch('ceilometer.publisher.test.TestPublisher') def test_notification_service_error_topic(self, fake_publisher_cls): fake_publisher_cls.return_value = self.publisher self.srv.start() notifier = messaging.get_notifier(self.transport, 'compute.vagrant-precise') notifier.error(context.RequestContext(), 'compute.instance.error', TEST_NOTICE_PAYLOAD) start = timeutils.utcnow() while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: if len(self.publisher.events) >= self.expected_events: break self.srv.stop() self.assertEqual(self.expected_events, len(self.publisher.events)) @mock.patch('ceilometer.publisher.test.TestPublisher') def test_notification_disable_non_metrics(self, fake_publisher_cls): self.CONF.set_override("disable_non_metric_meters", True, group="notification") # instance is a not a metric. we should only get back memory self.expected_samples = 1 fake_publisher_cls.return_value = self.publisher self._check_notification_service() self.assertEqual('memory', self.publisher.samples[0].name) @mock.patch.object(oslo_service.service.Service, 'stop') def test_notification_service_start_abnormal(self, mocked): try: self.srv.stop() except Exception: pass self.assertEqual(1, mocked.call_count) class TestRealNotificationHA(BaseRealNotification): def setUp(self): super(TestRealNotificationHA, self).setUp() self.CONF.set_override('workload_partitioning', True, group='notification') self.srv = notification.NotificationService() @mock.patch('ceilometer.publisher.test.TestPublisher') def test_notification_service(self, fake_publisher_cls): fake_publisher_cls.return_value = self.publisher self._check_notification_service() @mock.patch('oslo_messaging.get_batch_notification_listener') def test_reset_listener_on_refresh(self, mock_listener): mock_listener.side_effect = [ mock.MagicMock(), # main listener mock.MagicMock(), # pipeline listener mock.MagicMock(), # refresh pipeline listener ] self.srv.start() def _check_listener_targets(): args, kwargs = mock_listener.call_args self.assertEqual(20, len(args[1])) self.assertIsInstance(args[1][0], oslo_messaging.Target) _check_listener_targets() listener = self.srv.pipeline_listener self.srv._configure_pipeline_listener() self.assertIsNot(listener, self.srv.pipeline_listener) _check_listener_targets() self.srv.stop() @mock.patch('oslo_messaging.get_batch_notification_listener') def test_retain_common_targets_on_refresh(self, mock_listener): with mock.patch('ceilometer.coordination.PartitionCoordinator' '.extract_my_subset', return_value=[1, 2]): self.srv.start() listened_before = [target.topic for target in mock_listener.call_args[0][1]] self.assertEqual(4, len(listened_before)) with mock.patch('ceilometer.coordination.PartitionCoordinator' '.extract_my_subset', return_value=[1, 3]): self.srv._refresh_agent(None) listened_after = [target.topic for target in mock_listener.call_args[0][1]] self.assertEqual(4, len(listened_after)) common = set(listened_before) & set(listened_after) for topic in common: self.assertTrue(topic.endswith('1')) self.srv.stop() @mock.patch('oslo_messaging.get_batch_notification_listener') def test_notify_to_relevant_endpoint(self, mock_listener): self.srv.start() targets = mock_listener.call_args[0][1] self.assertIsNotEmpty(targets) endpoints = {} for endpoint in mock_listener.call_args[0][2]: self.assertEqual(1, len(endpoint.publish_context.pipelines)) pipe = list(endpoint.publish_context.pipelines)[0] endpoints[pipe.name] = endpoint notifiers = [] notifiers.extend(self.srv.pipe_manager.transporters[0][2]) notifiers.extend(self.srv.event_pipe_manager.transporters[0][2]) for notifier in notifiers: filter_rule = endpoints[notifier.publisher_id].filter_rule self.assertEqual(True, filter_rule.match(None, notifier.publisher_id, None, None, None)) self.srv.stop() @mock.patch('oslo_messaging.Notifier.sample') def test_broadcast_to_relevant_pipes_only(self, mock_notifier): self.srv.start() for endpoint in self.srv.listeners[0].dispatcher.endpoints: if (hasattr(endpoint, 'filter_rule') and not endpoint.filter_rule.match(None, None, 'nonmatching.end', None, None)): continue endpoint.info([{ 'ctxt': TEST_NOTICE_CTXT, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'nonmatching.end', 'payload': TEST_NOTICE_PAYLOAD, 'metadata': TEST_NOTICE_METADATA}]) self.assertFalse(mock_notifier.called) for endpoint in self.srv.listeners[0].dispatcher.endpoints: if (hasattr(endpoint, 'filter_rule') and not endpoint.filter_rule.match(None, None, 'compute.instance.create.end', None, None)): continue endpoint.info([{ 'ctxt': TEST_NOTICE_CTXT, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'compute.instance.create.end', 'payload': TEST_NOTICE_PAYLOAD, 'metadata': TEST_NOTICE_METADATA}]) self.assertTrue(mock_notifier.called) self.assertEqual(3, mock_notifier.call_count) self.assertEqual('pipeline.event', mock_notifier.call_args_list[0][1]['event_type']) self.assertEqual('ceilometer.pipeline', mock_notifier.call_args_list[1][1]['event_type']) self.assertEqual('ceilometer.pipeline', mock_notifier.call_args_list[2][1]['event_type']) self.srv.stop() class TestRealNotificationMultipleAgents(tests_base.BaseTestCase): def setup_pipeline(self, transformers): pipeline = yaml.dump({ 'sources': [{ 'name': 'test_pipeline', 'interval': 5, 'meters': ['instance', 'memory'], 'sinks': ['test_sink'] }], 'sinks': [{ 'name': 'test_sink', 'transformers': transformers, 'publishers': ['test://'] }] }) if six.PY3: pipeline = pipeline.encode('utf-8') pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, prefix="pipeline", suffix="yaml") return pipeline_cfg_file def setUp(self): super(TestRealNotificationMultipleAgents, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF([], project='ceilometer', validate_default_values=True) self.setup_messaging(self.CONF, 'nova') pipeline_cfg_file = self.setup_pipeline(['instance', 'memory']) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self.CONF.set_override("backend_url", None, group="coordination") self.CONF.set_override("store_events", False, group="notification") self.CONF.set_override("disable_non_metric_meters", False, group="notification") self.CONF.set_override('workload_partitioning', True, group='notification') self.CONF.set_override('pipeline_processing_queues', 2, group='notification') self.publisher = test_publisher.TestPublisher("") self.publisher2 = test_publisher.TestPublisher("") def _check_notifications(self, fake_publisher_cls): fake_publisher_cls.side_effect = [self.publisher, self.publisher2] self.srv = notification.NotificationService() self.srv2 = notification.NotificationService() with mock.patch('ceilometer.coordination.PartitionCoordinator' '._get_members', return_value=['harry', 'lloyd']): with mock.patch('uuid.uuid4', return_value='harry'): self.srv.start() with mock.patch('uuid.uuid4', return_value='lloyd'): self.srv2.start() notifier = messaging.get_notifier(self.transport, "compute.vagrant-precise") payload1 = TEST_NOTICE_PAYLOAD.copy() payload1['instance_id'] = '0' notifier.info(context.RequestContext(), 'compute.instance.create.end', payload1) payload2 = TEST_NOTICE_PAYLOAD.copy() payload2['instance_id'] = '1' notifier.info(context.RequestContext(), 'compute.instance.create.end', payload2) self.expected_samples = 4 start = timeutils.utcnow() with mock.patch('six.moves.builtins.hash', lambda x: int(x)): while timeutils.delta_seconds(start, timeutils.utcnow()) < 60: if (len(self.publisher.samples + self.publisher2.samples) >= self.expected_samples): break self.srv.stop() self.srv2.stop() self.assertEqual(2, len(self.publisher.samples)) self.assertEqual(2, len(self.publisher2.samples)) self.assertEqual(1, len(set( s.resource_id for s in self.publisher.samples))) self.assertEqual(1, len(set( s.resource_id for s in self.publisher2.samples))) @mock.patch('ceilometer.publisher.test.TestPublisher') def test_multiple_agents_no_transform(self, fake_publisher_cls): pipeline_cfg_file = self.setup_pipeline([]) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self._check_notifications(fake_publisher_cls) @mock.patch('ceilometer.publisher.test.TestPublisher') def test_multiple_agents_transform(self, fake_publisher_cls): pipeline_cfg_file = self.setup_pipeline( [{ 'name': 'unit_conversion', 'parameters': { 'source': {}, 'target': {'name': 'cpu_mins', 'unit': 'min', 'scale': 'volume'}, } }]) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self._check_notifications(fake_publisher_cls) @mock.patch('ceilometer.publisher.test.TestPublisher') def test_multiple_agents_multiple_transform(self, fake_publisher_cls): pipeline_cfg_file = self.setup_pipeline( [{ 'name': 'unit_conversion', 'parameters': { 'source': {}, 'target': {'name': 'cpu_mins', 'unit': 'min', 'scale': 'volume'}, } }, { 'name': 'unit_conversion', 'parameters': { 'source': {}, 'target': {'name': 'cpu_mins', 'unit': 'min', 'scale': 'volume'}, } }]) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self._check_notifications(fake_publisher_cls) ceilometer-6.1.5/ceilometer/tests/functional/test_collector.py0000664000567000056710000002235513072744706026070 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket import mock import msgpack from oslo_config import fixture as fixture_config import oslo_messaging from oslo_utils import timeutils from oslotest import mockpatch from stevedore import extension from ceilometer import collector from ceilometer import dispatcher from ceilometer.publisher import utils from ceilometer import sample from ceilometer.tests import base as tests_base class FakeException(Exception): pass class FakeConnection(object): def create_worker(self, topic, proxy, pool_name): pass class TestCollector(tests_base.BaseTestCase): def setUp(self): super(TestCollector, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF.import_opt("connection", "oslo_db.options", group="database") self.CONF.set_override("connection", "log://", group='database') self.CONF.set_override('telemetry_secret', 'not-so-secret', group='publisher') self._setup_messaging() self.counter = sample.Sample( name='foobar', type='bad', unit='F', volume=1, user_id='jd', project_id='ceilometer', resource_id='cat', timestamp=timeutils.utcnow().isoformat(), resource_metadata={}, ).as_dict() self.utf8_msg = utils.meter_message_from_counter( sample.Sample( name=u'test', type=sample.TYPE_CUMULATIVE, unit=u'', volume=1, user_id=u'test', project_id=u'test', resource_id=u'test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={u'name': [([u'TestPublish'])]}, source=u'testsource', ), 'not-so-secret') self.srv = collector.CollectorService() self.useFixture(mockpatch.PatchObject( self.srv.tg, 'add_thread', side_effect=self._dummy_thread_group_add_thread)) @staticmethod def _dummy_thread_group_add_thread(method): method() def _setup_messaging(self, enabled=True): if enabled: self.setup_messaging(self.CONF) else: self.useFixture(mockpatch.Patch( 'ceilometer.messaging.get_transport', return_value=None)) def _setup_fake_dispatcher(self): plugin = mock.MagicMock() fake_dispatcher = extension.ExtensionManager.make_test_instance([ extension.Extension('test', None, None, plugin,), ], propagate_map_exceptions=True) self.useFixture(mockpatch.Patch( 'ceilometer.dispatcher.load_dispatcher_manager', return_value=(fake_dispatcher, fake_dispatcher))) return plugin def _make_fake_socket(self, sample): def recvfrom(size): # Make the loop stop self.srv.stop() return msgpack.dumps(sample), ('127.0.0.1', 12345) sock = mock.Mock() sock.recvfrom = recvfrom return sock def _verify_udp_socket(self, udp_socket): conf = self.CONF.collector udp_socket.setsockopt.assert_called_once_with(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) udp_socket.bind.assert_called_once_with((conf.udp_address, conf.udp_port)) def test_record_metering_data(self): mock_dispatcher = self._setup_fake_dispatcher() dps = dispatcher.load_dispatcher_manager() (self.srv.meter_manager, self.srv.manager) = dps self.srv.record_metering_data(None, self.counter) mock_dispatcher.record_metering_data.assert_called_once_with( data=self.counter) def test_udp_receive_base(self): self._setup_messaging(False) mock_dispatcher = self._setup_fake_dispatcher() self.counter['source'] = 'mysource' self.counter['counter_name'] = self.counter['name'] self.counter['counter_volume'] = self.counter['volume'] self.counter['counter_type'] = self.counter['type'] self.counter['counter_unit'] = self.counter['unit'] udp_socket = self._make_fake_socket(self.counter) with mock.patch('socket.socket') as mock_socket: mock_socket.return_value = udp_socket self.srv.start() mock_socket.assert_called_with(socket.AF_INET, socket.SOCK_DGRAM) self._verify_udp_socket(udp_socket) mock_dispatcher.record_metering_data.assert_called_once_with( self.counter) def test_udp_socket_ipv6(self): self._setup_messaging(False) self.CONF.set_override('udp_address', '::1', group='collector') self._setup_fake_dispatcher() sock = self._make_fake_socket('data') with mock.patch.object(socket, 'socket') as mock_socket: mock_socket.return_value = sock self.srv.start() mock_socket.assert_called_with(socket.AF_INET6, socket.SOCK_DGRAM) def test_udp_receive_storage_error(self): self._setup_messaging(False) mock_dispatcher = self._setup_fake_dispatcher() mock_dispatcher.record_metering_data.side_effect = self._raise_error self.counter['source'] = 'mysource' self.counter['counter_name'] = self.counter['name'] self.counter['counter_volume'] = self.counter['volume'] self.counter['counter_type'] = self.counter['type'] self.counter['counter_unit'] = self.counter['unit'] udp_socket = self._make_fake_socket(self.counter) with mock.patch('socket.socket', return_value=udp_socket): self.srv.start() self._verify_udp_socket(udp_socket) mock_dispatcher.record_metering_data.assert_called_once_with( self.counter) @staticmethod def _raise_error(*args, **kwargs): raise Exception def test_udp_receive_bad_decoding(self): self._setup_messaging(False) self._setup_fake_dispatcher() udp_socket = self._make_fake_socket(self.counter) with mock.patch('socket.socket', return_value=udp_socket): with mock.patch('msgpack.loads', self._raise_error): self.srv.start() self._verify_udp_socket(udp_socket) @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start') @mock.patch.object(collector.CollectorService, 'start_udp') def test_only_udp(self, udp_start, rpc_start): """Check that only UDP is started if messaging transport is unset.""" self._setup_messaging(False) self._setup_fake_dispatcher() udp_socket = self._make_fake_socket(self.counter) with mock.patch('socket.socket', return_value=udp_socket): self.srv.start() self.assertEqual(0, rpc_start.call_count) self.assertEqual(1, udp_start.call_count) def test_udp_receive_valid_encoding(self): self._setup_messaging(False) mock_dispatcher = self._setup_fake_dispatcher() self.data_sent = [] with mock.patch('socket.socket', return_value=self._make_fake_socket(self.utf8_msg)): self.srv.start() self.assertTrue(utils.verify_signature( mock_dispatcher.method_calls[0][1][0], "not-so-secret")) def _test_collector_requeue(self, listener, batch_listener=False): mock_dispatcher = self._setup_fake_dispatcher() self.srv.dispatcher_manager = dispatcher.load_dispatcher_manager() mock_dispatcher.record_metering_data.side_effect = Exception('boom') mock_dispatcher.record_events.side_effect = Exception('boom') self.srv.start() endp = getattr(self.srv, listener).dispatcher.endpoints[0] ret = endp.sample([{'ctxt': {}, 'publisher_id': 'pub_id', 'event_type': 'event', 'payload': {}, 'metadata': {}}]) self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', mock.Mock()) @mock.patch.object(collector.CollectorService, 'start_udp', mock.Mock()) def test_collector_sample_requeue(self): self._test_collector_requeue('sample_listener') @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', mock.Mock()) @mock.patch.object(collector.CollectorService, 'start_udp', mock.Mock()) def test_collector_event_requeue(self): self.CONF.set_override('store_events', True, group='notification') self._test_collector_requeue('event_listener') ceilometer-6.1.5/ceilometer/tests/functional/storage/0000775000567000056710000000000013072745164024125 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/functional/storage/test_impl_db2.py0000664000567000056710000001442213072744706027232 0ustar jenkinsjenkins00000000000000# # Copyright Ericsson AB 2014. All rights reserved # # Authors: Ildiko Vancsa # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/storage/impl_db2.py .. note:: In order to run the tests against another MongoDB server set the environment variable CEILOMETER_TEST_DB2_URL to point to a DB2 server before running the tests. """ import bson import mock from oslo_config import cfg from oslo_utils import timeutils from ceilometer.event.storage import impl_db2 as impl_db2_event from ceilometer.storage import impl_db2 from ceilometer.storage.mongo import utils as pymongo_utils from ceilometer.tests import base as test_base class CapabilitiesTest(test_base.BaseTestCase): # Check the returned capabilities list, which is specific to each DB # driver def test_capabilities(self): expected_capabilities = { 'meters': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'resources': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'samples': {'query': {'simple': True, 'metadata': True, 'complex': True}}, 'statistics': {'groupby': True, 'query': {'simple': True, 'metadata': True, 'complex': False}, 'aggregation': {'standard': True, 'selectable': { 'max': False, 'min': False, 'sum': False, 'avg': False, 'count': False, 'stddev': False, 'cardinality': False}} }, } actual_capabilities = impl_db2.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) def test_event_capabilities(self): expected_capabilities = { 'events': {'query': {'simple': True}}, } actual_capabilities = impl_db2_event.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) def test_storage_capabilities(self): expected_capabilities = { 'storage': {'production_ready': True}, } actual_capabilities = impl_db2.Connection.get_storage_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) class ConnectionTest(test_base.BaseTestCase): @mock.patch.object(impl_db2.Connection, '_generate_random_str') @mock.patch.object(pymongo_utils.ConnectionPool, 'connect') @mock.patch.object(timeutils, 'utcnow') @mock.patch.object(bson.objectid, 'ObjectId') def test_upgrade(self, meter_id, timestamp, mongo_connect, _generate_random_str): conn_mock = mock.MagicMock() conn_mock.server_info.return_value = {} _generate_random_str.return_value = 'wew' * 247 + 'x' * 3 conn_mock.ceilodb2.resource.index_information.return_value = {} mongo_connect.return_value = conn_mock meter_id.return_value = '54b8860d75bfe43b54e84ce7' timestamp.return_value = 'timestamp' cfg.CONF.set_override('db2nosql_resource_id_maxlen', 256, group='database') impl_db2.Connection('db2://user:pwd@localhost:27017/ceilodb2') resource_id = 'wew' * 247 + 'x' * 3 conn_mock.ceilodb2.resource.insert_one.assert_called_with( {'_id': resource_id, 'no_key': resource_id}) conn_mock.ceilodb2.meter.insert_one.assert_called_with( {'_id': '54b8860d75bfe43b54e84ce7', 'no_key': '54b8860d75bfe43b54e84ce7', 'timestamp': 'timestamp'}) @mock.patch.object(pymongo_utils.ConnectionPool, 'connect') @mock.patch.object(bson.objectid, 'ObjectId') def test_generate_random_str_with_less_config_len(self, objectid, mongo_connect): fake_str = '54b8860d75bfe43b54e84ce7' conn_mock = mock.MagicMock() conn_mock.server_info.return_value = {} mongo_connect.return_value = conn_mock objectid.return_value = fake_str cfg.CONF.set_override('db2nosql_resource_id_maxlen', 20, group='database') conn = impl_db2.Connection('db2://user:pwd@localhost:27017/ceilodb2') rand_str = conn._generate_random_str(20) self.assertEqual(fake_str, rand_str) @mock.patch.object(pymongo_utils.ConnectionPool, 'connect') @mock.patch.object(bson.objectid, 'ObjectId') def test_generate_random_str_with_default_config_len(self, objectid, mongo_connect): fake_str = '54b8860d75bfe43b54e84ce7' conn_mock = mock.MagicMock() conn_mock.server_info.return_value = {} mongo_connect.return_value = conn_mock objectid.return_value = fake_str cfg.CONF.set_override('db2nosql_resource_id_maxlen', 512, group='database') conn = impl_db2.Connection('db2://user:pwd@localhost:27017/ceilodb2') rand_str = conn._generate_random_str(512) str_len = len(str(fake_str)) expect_str = fake_str * int(512 / str_len) + 'x' * (512 % str_len) self.assertEqual(expect_str, rand_str) ceilometer-6.1.5/ceilometer/tests/functional/storage/test_storage_scenarios.py0000664000567000056710000044105213072744706031257 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Base classes for DB backend implementation test """ import datetime import operator import mock from oslo_config import cfg from oslo_db import api from oslo_db import exception as dbexc from oslo_utils import timeutils import pymongo import ceilometer from ceilometer.event.storage import models as event_models from ceilometer.publisher import utils from ceilometer import sample from ceilometer import storage from ceilometer.tests import db as tests_db class DBTestBase(tests_db.TestBase): @staticmethod def create_side_effect(method, exception_type, test_exception): def side_effect(*args, **kwargs): if test_exception.pop(): raise exception_type else: return method(*args, **kwargs) return side_effect def create_and_store_sample(self, timestamp=datetime.datetime.utcnow(), metadata=None, name='instance', sample_type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='user-id', project_id='project-id', resource_id='resource-id', source=None): metadata = metadata or {'display_name': 'test-server', 'tag': 'self.counter'} s = sample.Sample( name, sample_type, unit=unit, volume=volume, user_id=user_id, project_id=project_id, resource_id=resource_id, timestamp=timestamp, resource_metadata=metadata, source=source ) msg = utils.meter_message_from_counter( s, self.CONF.publisher.telemetry_secret ) self.conn.record_metering_data(msg) return msg def setUp(self): super(DBTestBase, self).setUp() patcher = mock.patch.object(timeutils, 'utcnow') self.addCleanup(patcher.stop) self.mock_utcnow = patcher.start() self.mock_utcnow.return_value = datetime.datetime(2015, 7, 2, 10, 39) self.prepare_data() def prepare_data(self): original_timestamps = [(2012, 7, 2, 10, 40), (2012, 7, 2, 10, 41), (2012, 7, 2, 10, 41), (2012, 7, 2, 10, 42), (2012, 7, 2, 10, 43)] timestamps_for_test_samples_default_order = [(2012, 7, 2, 10, 44), (2011, 5, 30, 18, 3), (2012, 12, 1, 1, 25), (2012, 2, 29, 6, 59), (2013, 5, 31, 23, 7)] timestamp_list = (original_timestamps + timestamps_for_test_samples_default_order) self.msgs = [] self.msgs.append(self.create_and_store_sample( timestamp=datetime.datetime(2012, 7, 2, 10, 39), source='test-1') ) self.msgs.append(self.create_and_store_sample( timestamp=datetime.datetime(*timestamp_list[0]), source='test-1') ) self.msgs.append(self.create_and_store_sample( timestamp=datetime.datetime(*timestamp_list[1]), resource_id='resource-id-alternate', metadata={'display_name': 'test-server', 'tag': 'self.counter2'}, source='test-2') ) self.msgs.append(self.create_and_store_sample( timestamp=datetime.datetime(*timestamp_list[2]), resource_id='resource-id-alternate', user_id='user-id-alternate', metadata={'display_name': 'test-server', 'tag': 'self.counter3'}, source='test-3') ) start_idx = 3 end_idx = len(timestamp_list) for i, ts in zip(range(start_idx - 1, end_idx - 1), timestamp_list[start_idx:end_idx]): self.msgs.append( self.create_and_store_sample( timestamp=datetime.datetime(*ts), user_id='user-id-%s' % i, project_id='project-id-%s' % i, resource_id='resource-id-%s' % i, metadata={ 'display_name': 'test-server', 'tag': 'counter-%s' % i }, source='test') ) class ResourceTest(DBTestBase): def prepare_data(self): super(ResourceTest, self).prepare_data() self.msgs.append(self.create_and_store_sample( timestamp=datetime.datetime(2012, 7, 2, 10, 39), user_id='mongodb_test', resource_id='resource-id-mongo_bad_key', project_id='project-id-test', metadata={'display.name': {'name.$1': 'test-server1', '$name_2': 'test-server2'}, 'tag': 'self.counter'}, source='test-4' )) def test_get_resources(self): expected_first_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 39) expected_last_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 40) msgs_sources = [msg['source'] for msg in self.msgs] resources = list(self.conn.get_resources()) self.assertEqual(10, len(resources)) for resource in resources: if resource.resource_id != 'resource-id': continue self.assertEqual(expected_first_sample_timestamp, resource.first_sample_timestamp) self.assertEqual(expected_last_sample_timestamp, resource.last_sample_timestamp) self.assertEqual('resource-id', resource.resource_id) self.assertEqual('project-id', resource.project_id) self.assertIn(resource.source, msgs_sources) self.assertEqual('user-id', resource.user_id) self.assertEqual('test-server', resource.metadata['display_name']) break else: self.fail('Never found resource-id') def test_get_resources_start_timestamp(self): timestamp = datetime.datetime(2012, 7, 2, 10, 42) expected = set(['resource-id-2', 'resource-id-3', 'resource-id-4', 'resource-id-6', 'resource-id-8']) resources = list(self.conn.get_resources(start_timestamp=timestamp)) resource_ids = [r.resource_id for r in resources] self.assertEqual(expected, set(resource_ids)) resources = list(self.conn.get_resources(start_timestamp=timestamp, start_timestamp_op='ge')) resource_ids = [r.resource_id for r in resources] self.assertEqual(expected, set(resource_ids)) resources = list(self.conn.get_resources(start_timestamp=timestamp, start_timestamp_op='gt')) resource_ids = [r.resource_id for r in resources] expected.remove('resource-id-2') self.assertEqual(expected, set(resource_ids)) def test_get_resources_end_timestamp(self): timestamp = datetime.datetime(2012, 7, 2, 10, 42) expected = set(['resource-id', 'resource-id-alternate', 'resource-id-5', 'resource-id-7', 'resource-id-mongo_bad_key']) resources = list(self.conn.get_resources(end_timestamp=timestamp)) resource_ids = [r.resource_id for r in resources] self.assertEqual(expected, set(resource_ids)) resources = list(self.conn.get_resources(end_timestamp=timestamp, end_timestamp_op='lt')) resource_ids = [r.resource_id for r in resources] self.assertEqual(expected, set(resource_ids)) resources = list(self.conn.get_resources(end_timestamp=timestamp, end_timestamp_op='le')) resource_ids = [r.resource_id for r in resources] expected.add('resource-id-2') self.assertEqual(expected, set(resource_ids)) def test_get_resources_both_timestamps(self): start_ts = datetime.datetime(2012, 7, 2, 10, 42) end_ts = datetime.datetime(2012, 7, 2, 10, 43) resources = list(self.conn.get_resources(start_timestamp=start_ts, end_timestamp=end_ts)) resource_ids = [r.resource_id for r in resources] self.assertEqual(set(['resource-id-2']), set(resource_ids)) resources = list(self.conn.get_resources(start_timestamp=start_ts, end_timestamp=end_ts, start_timestamp_op='ge', end_timestamp_op='lt')) resource_ids = [r.resource_id for r in resources] self.assertEqual(set(['resource-id-2']), set(resource_ids)) resources = list(self.conn.get_resources(start_timestamp=start_ts, end_timestamp=end_ts, start_timestamp_op='gt', end_timestamp_op='lt')) resource_ids = [r.resource_id for r in resources] self.assertEqual(0, len(resource_ids)) resources = list(self.conn.get_resources(start_timestamp=start_ts, end_timestamp=end_ts, start_timestamp_op='gt', end_timestamp_op='le')) resource_ids = [r.resource_id for r in resources] self.assertEqual(set(['resource-id-3']), set(resource_ids)) resources = list(self.conn.get_resources(start_timestamp=start_ts, end_timestamp=end_ts, start_timestamp_op='ge', end_timestamp_op='le')) resource_ids = [r.resource_id for r in resources] self.assertEqual(set(['resource-id-2', 'resource-id-3']), set(resource_ids)) def test_get_resources_by_source(self): resources = list(self.conn.get_resources(source='test-1')) self.assertEqual(1, len(resources)) ids = set(r.resource_id for r in resources) self.assertEqual(set(['resource-id']), ids) def test_get_resources_by_user(self): resources = list(self.conn.get_resources(user='user-id')) self.assertTrue(len(resources) == 2 or len(resources) == 1) ids = set(r.resource_id for r in resources) # tolerate storage driver only reporting latest owner of resource resources_ever_owned_by = set(['resource-id', 'resource-id-alternate']) resources_now_owned_by = set(['resource-id']) self.assertTrue(ids == resources_ever_owned_by or ids == resources_now_owned_by, 'unexpected resources: %s' % ids) def test_get_resources_by_alternate_user(self): resources = list(self.conn.get_resources(user='user-id-alternate')) self.assertEqual(1, len(resources)) # only a single resource owned by this user ever self.assertEqual('resource-id-alternate', resources[0].resource_id) def test_get_resources_by_project(self): resources = list(self.conn.get_resources(project='project-id')) self.assertEqual(2, len(resources)) ids = set(r.resource_id for r in resources) self.assertEqual(set(['resource-id', 'resource-id-alternate']), ids) def test_get_resources_by_metaquery(self): q = {'metadata.display_name': 'test-server'} resources = list(self.conn.get_resources(metaquery=q)) self.assertEqual(9, len(resources)) def test_get_resources_by_metaquery_key_with_dot_in_metadata(self): q = {'metadata.display.name.$name_2': 'test-server2', 'metadata.display.name.name.$1': 'test-server1'} resources = list(self.conn.get_resources(metaquery=q)) self.assertEqual(1, len(resources)) def test_get_resources_by_empty_metaquery(self): resources = list(self.conn.get_resources(metaquery={})) self.assertEqual(10, len(resources)) def test_get_resources_most_recent_metadata_all(self): resources = self.conn.get_resources() expected_tags = ['self.counter', 'self.counter3', 'counter-2', 'counter-3', 'counter-4', 'counter-5', 'counter-6', 'counter-7', 'counter-8'] for resource in resources: self.assertIn(resource.metadata['tag'], expected_tags) def test_get_resources_most_recent_metadata_single(self): resource = list( self.conn.get_resources(resource='resource-id-alternate') )[0] expected_tag = 'self.counter3' self.assertEqual(expected_tag, resource.metadata['tag']) class ResourceTestOrdering(DBTestBase): def prepare_data(self): sample_timings = [('resource-id-1', [(2013, 8, 10, 10, 43), (2013, 8, 10, 10, 44), (2013, 8, 10, 10, 42), (2013, 8, 10, 10, 49), (2013, 8, 10, 10, 47)]), ('resource-id-2', [(2013, 8, 10, 10, 43), (2013, 8, 10, 10, 48), (2013, 8, 10, 10, 42), (2013, 8, 10, 10, 48), (2013, 8, 10, 10, 47)]), ('resource-id-3', [(2013, 8, 10, 10, 43), (2013, 8, 10, 10, 44), (2013, 8, 10, 10, 50), (2013, 8, 10, 10, 49), (2013, 8, 10, 10, 47)])] counter = 0 for resource, timestamps in sample_timings: for timestamp in timestamps: self.create_and_store_sample( timestamp=datetime.datetime(*timestamp), resource_id=resource, user_id=str(counter % 2), project_id=str(counter % 3), metadata={ 'display_name': 'test-server', 'tag': 'sample-%s' % counter }, source='test' ) counter += 1 def test_get_resources_ordering_all(self): resources = list(self.conn.get_resources()) expected = set([ ('resource-id-1', 'sample-3'), ('resource-id-2', 'sample-8'), ('resource-id-3', 'sample-12') ]) received = set([(r.resource_id, r.metadata['tag']) for r in resources]) self.assertEqual(expected, received) def test_get_resources_ordering_single(self): resource = list(self.conn.get_resources(resource='resource-id-2'))[0] self.assertEqual('resource-id-2', resource.resource_id) self.assertEqual('sample-8', resource.metadata['tag']) class MeterTest(DBTestBase): def test_get_meters(self): msgs_sources = [msg['source'] for msg in self.msgs] results = list(self.conn.get_meters()) self.assertEqual(9, len(results)) for meter in results: self.assertIn(meter.source, msgs_sources) def test_get_meters_by_user(self): results = list(self.conn.get_meters(user='user-id')) self.assertEqual(1, len(results)) def test_get_meters_by_project(self): results = list(self.conn.get_meters(project='project-id')) self.assertEqual(2, len(results)) def test_get_meters_by_metaquery(self): q = {'metadata.display_name': 'test-server'} results = list(self.conn.get_meters(metaquery=q)) self.assertIsNotEmpty(results) self.assertEqual(9, len(results)) def test_get_meters_by_empty_metaquery(self): results = list(self.conn.get_meters(metaquery={})) self.assertEqual(9, len(results)) class RawSampleTest(DBTestBase): def prepare_data(self): super(RawSampleTest, self).prepare_data() self.msgs.append(self.create_and_store_sample( timestamp=datetime.datetime(2012, 7, 2, 10, 39), user_id='mongodb_test', resource_id='resource-id-mongo_bad_key', project_id='project-id-test', metadata={'display.name': {'name.$1': 'test-server1', '$name_2': 'test-server2'}, 'tag': 'self.counter'}, source='test-4' )) def test_get_sample_counter_volume(self): # NOTE(idegtiarov) Because wsme expected a float type of data this test # checks type of counter_volume received from database. f = storage.SampleFilter() result = next(self.conn.get_samples(f, limit=1)) self.assertIsInstance(result.counter_volume, float) def test_get_samples_limit_zero(self): f = storage.SampleFilter() results = list(self.conn.get_samples(f, limit=0)) self.assertEqual(0, len(results)) def test_get_samples_limit(self): f = storage.SampleFilter() results = list(self.conn.get_samples(f, limit=3)) self.assertEqual(3, len(results)) for result in results: self.assertTimestampEqual(timeutils.utcnow(), result.recorded_at) def test_get_samples_in_default_order(self): f = storage.SampleFilter() prev_timestamp = None for sample_item in self.conn.get_samples(f): if prev_timestamp is not None: self.assertTrue(prev_timestamp >= sample_item.timestamp) prev_timestamp = sample_item.timestamp def test_get_samples_by_user(self): f = storage.SampleFilter(user='user-id') results = list(self.conn.get_samples(f)) self.assertEqual(3, len(results)) for meter in results: d = meter.as_dict() self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at']) del d['recorded_at'] self.assertIn(d, self.msgs[:3]) def test_get_samples_by_user_limit(self): f = storage.SampleFilter(user='user-id') results = list(self.conn.get_samples(f, limit=1)) self.assertEqual(1, len(results)) def test_get_samples_by_user_limit_bigger(self): f = storage.SampleFilter(user='user-id') results = list(self.conn.get_samples(f, limit=42)) self.assertEqual(3, len(results)) def test_get_samples_by_project(self): f = storage.SampleFilter(project='project-id') results = list(self.conn.get_samples(f)) self.assertIsNotNone(results) for meter in results: d = meter.as_dict() self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at']) del d['recorded_at'] self.assertIn(d, self.msgs[:4]) def test_get_samples_by_resource(self): f = storage.SampleFilter(user='user-id', resource='resource-id') results = list(self.conn.get_samples(f)) self.assertEqual(2, len(results)) d = results[1].as_dict() self.assertEqual(timeutils.utcnow(), d['recorded_at']) del d['recorded_at'] self.assertEqual(self.msgs[0], d) def test_get_samples_by_metaquery(self): q = {'metadata.display_name': 'test-server'} f = storage.SampleFilter(metaquery=q) results = list(self.conn.get_samples(f)) self.assertIsNotNone(results) for meter in results: d = meter.as_dict() self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at']) del d['recorded_at'] self.assertIn(d, self.msgs) def test_get_samples_by_metaquery_key_with_dot_in_metadata(self): q = {'metadata.display.name.name.$1': 'test-server1', 'metadata.display.name.$name_2': 'test-server2'} f = storage.SampleFilter(metaquery=q) results = list(self.conn.get_samples(f)) self.assertIsNotNone(results) self.assertEqual(1, len(results)) def test_get_samples_by_start_time(self): timestamp = datetime.datetime(2012, 7, 2, 10, 41) f = storage.SampleFilter( user='user-id', start_timestamp=timestamp, ) results = list(self.conn.get_samples(f)) self.assertEqual(1, len(results)) self.assertEqual(timestamp, results[0].timestamp) f.start_timestamp_op = 'ge' results = list(self.conn.get_samples(f)) self.assertEqual(1, len(results)) self.assertEqual(timestamp, results[0].timestamp) f.start_timestamp_op = 'gt' results = list(self.conn.get_samples(f)) self.assertEqual(0, len(results)) def test_get_samples_by_end_time(self): timestamp = datetime.datetime(2012, 7, 2, 10, 40) f = storage.SampleFilter( user='user-id', end_timestamp=timestamp, ) results = list(self.conn.get_samples(f)) self.assertEqual(1, len(results)) f.end_timestamp_op = 'lt' results = list(self.conn.get_samples(f)) self.assertEqual(1, len(results)) f.end_timestamp_op = 'le' results = list(self.conn.get_samples(f)) self.assertEqual(2, len(results)) self.assertEqual(datetime.datetime(2012, 7, 2, 10, 39), results[1].timestamp) def test_get_samples_by_both_times(self): start_ts = datetime.datetime(2012, 7, 2, 10, 42) end_ts = datetime.datetime(2012, 7, 2, 10, 43) f = storage.SampleFilter( start_timestamp=start_ts, end_timestamp=end_ts, ) results = list(self.conn.get_samples(f)) self.assertEqual(1, len(results)) self.assertEqual(start_ts, results[0].timestamp) f.start_timestamp_op = 'gt' f.end_timestamp_op = 'lt' results = list(self.conn.get_samples(f)) self.assertEqual(0, len(results)) f.start_timestamp_op = 'ge' f.end_timestamp_op = 'lt' results = list(self.conn.get_samples(f)) self.assertEqual(1, len(results)) self.assertEqual(start_ts, results[0].timestamp) f.start_timestamp_op = 'gt' f.end_timestamp_op = 'le' results = list(self.conn.get_samples(f)) self.assertEqual(1, len(results)) self.assertEqual(end_ts, results[0].timestamp) f.start_timestamp_op = 'ge' f.end_timestamp_op = 'le' results = list(self.conn.get_samples(f)) self.assertEqual(2, len(results)) self.assertEqual(end_ts, results[0].timestamp) self.assertEqual(start_ts, results[1].timestamp) def test_get_samples_by_name(self): f = storage.SampleFilter(user='user-id', meter='no-such-meter') results = list(self.conn.get_samples(f)) self.assertIsEmpty(results) def test_get_samples_by_name2(self): f = storage.SampleFilter(user='user-id', meter='instance') results = list(self.conn.get_samples(f)) self.assertIsNotEmpty(results) def test_get_samples_by_source(self): f = storage.SampleFilter(source='test-1') results = list(self.conn.get_samples(f)) self.assertEqual(2, len(results)) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2') def test_clear_metering_data(self): # NOTE(jd) Override this test in MongoDB because our code doesn't clear # the collections, this is handled by MongoDB TTL feature. self.mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) self.conn.clear_expired_metering_data(3 * 60) f = storage.SampleFilter(meter='instance') results = list(self.conn.get_samples(f)) self.assertEqual(5, len(results)) results = list(self.conn.get_resources()) self.assertEqual(5, len(results)) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2') def test_clear_metering_data_no_data_to_remove(self): # NOTE(jd) Override this test in MongoDB because our code doesn't clear # the collections, this is handled by MongoDB TTL feature. self.mock_utcnow.return_value = datetime.datetime(2010, 7, 2, 10, 45) self.conn.clear_expired_metering_data(3 * 60) f = storage.SampleFilter(meter='instance') results = list(self.conn.get_samples(f)) self.assertEqual(12, len(results)) results = list(self.conn.get_resources()) self.assertEqual(10, len(results)) @tests_db.run_with('sqlite', 'mysql', 'pgsql') def test_clear_metering_data_expire_samples_only(self): cfg.CONF.set_override('sql_expire_samples_only', True) self.mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) self.conn.clear_expired_metering_data(4 * 60) f = storage.SampleFilter(meter='instance') results = list(self.conn.get_samples(f)) self.assertEqual(7, len(results)) results = list(self.conn.get_resources()) self.assertEqual(6, len(results)) @tests_db.run_with('sqlite', 'mysql', 'pgsql') def test_record_metering_data_retry_success_on_deadlock(self): raise_deadlock = [False, True] self.CONF.set_override('max_retries', 2, group='database') s = sample.Sample('instance', sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='user_id', project_id='project_id', resource_id='resource_id', timestamp=datetime.datetime.utcnow(), resource_metadata={'display_name': 'test-server', 'tag': 'self.counter'}, source=None) msg = utils.meter_message_from_counter( s, self.CONF.publisher.telemetry_secret ) mock_resource_create = mock.patch.object(self.conn, "_create_resource") mock_resource_create.side_effect = self.create_side_effect( self.conn._create_resource, dbexc.DBDeadlock, raise_deadlock) with mock.patch.object(api.time, 'sleep') as retry_sleep: self.conn.record_metering_data(msg) self.assertEqual(1, retry_sleep.call_count) f = storage.SampleFilter(meter='instance') results = list(self.conn.get_samples(f)) self.assertEqual(13, len(results)) @tests_db.run_with('sqlite', 'mysql', 'pgsql') def test_record_metering_data_retry_failure_on_deadlock(self): raise_deadlock = [True, True, True] self.CONF.set_override('max_retries', 3, group='database') s = sample.Sample('instance', sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='user_id', project_id='project_id', resource_id='resource_id', timestamp=datetime.datetime.utcnow(), resource_metadata={'display_name': 'test-server', 'tag': 'self.counter'}, source=None) msg = utils.meter_message_from_counter( s, self.CONF.publisher.telemetry_secret ) mock_resource_create = mock.patch.object(self.conn, "_create_resource") mock_resource_create.side_effect = self.create_side_effect( self.conn._create_resource, dbexc.DBDeadlock, raise_deadlock) with mock.patch.object(api.time, 'sleep') as retry_sleep: try: self.conn.record_metering_data(msg) except dbexc.DBError as err: self.assertIn('DBDeadlock', str(type(err))) self.assertEqual(3, retry_sleep.call_count) class ComplexSampleQueryTest(DBTestBase): def setUp(self): super(ComplexSampleQueryTest, self).setUp() self.complex_filter = { "and": [{"or": [{"=": {"resource_id": "resource-id-42"}}, {"=": {"resource_id": "resource-id-44"}}]}, {"and": [{"=": {"counter_name": "cpu_util"}}, {"and": [{">": {"counter_volume": 0.4}}, {"not": {">": {"counter_volume": 0.8}}}]}]}]} or_expression = [{"=": {"resource_id": "resource-id-42"}}, {"=": {"resource_id": "resource-id-43"}}, {"=": {"resource_id": "resource-id-44"}}] and_expression = [{">": {"counter_volume": 0.4}}, {"not": {">": {"counter_volume": 0.8}}}] self.complex_filter_list = {"and": [{"or": or_expression}, {"and": [{"=": {"counter_name": "cpu_util"}}, {"and": and_expression}]}]} in_expression = {"in": {"resource_id": ["resource-id-42", "resource-id-43", "resource-id-44"]}} self.complex_filter_in = {"and": [in_expression, {"and": [{"=": {"counter_name": "cpu_util"}}, {"and": and_expression}]}]} def _create_samples(self): for resource in range(42, 45): for volume in [0.79, 0.41, 0.4, 0.8, 0.39, 0.81]: metadata = {'a_string_key': "meta-value" + str(volume), 'a_float_key': volume, 'an_int_key': resource, 'a_bool_key': (resource == 43)} self.create_and_store_sample(resource_id="resource-id-%s" % resource, metadata=metadata, name="cpu_util", volume=volume) def test_no_filter(self): results = list(self.conn.query_samples()) self.assertEqual(len(self.msgs), len(results)) for sample_item in results: d = sample_item.as_dict() del d['recorded_at'] self.assertIn(d, self.msgs) def test_query_complex_filter_with_regexp(self): self._create_samples() complex_regex_filter = {"and": [ {"=~": {"resource_id": "resource-id.*"}}, {"=": {"counter_volume": 0.4}}]} results = list( self.conn.query_samples(filter_expr=complex_regex_filter)) self.assertEqual(3, len(results)) for sample_item in results: self.assertIn(sample_item.resource_id, set(["resource-id-42", "resource-id-43", "resource-id-44"])) def test_query_complex_filter_with_regexp_metadata(self): self._create_samples() complex_regex_filter = {"and": [ {"=~": {"resource_metadata.a_string_key": "meta-value.*"}}, {"=": {"counter_volume": 0.4}}]} results = list( self.conn.query_samples(filter_expr=complex_regex_filter)) self.assertEqual(3, len(results)) for sample_item in results: self.assertEqual("meta-value0.4", sample_item.resource_metadata['a_string_key']) def test_no_filter_with_zero_limit(self): limit = 0 results = list(self.conn.query_samples(limit=limit)) self.assertEqual(limit, len(results)) def test_no_filter_with_limit(self): limit = 3 results = list(self.conn.query_samples(limit=limit)) self.assertEqual(limit, len(results)) def test_query_simple_filter(self): simple_filter = {"=": {"resource_id": "resource-id-8"}} results = list(self.conn.query_samples(filter_expr=simple_filter)) self.assertEqual(1, len(results)) for sample_item in results: self.assertEqual("resource-id-8", sample_item.resource_id) def test_query_simple_filter_with_not_equal_relation(self): simple_filter = {"!=": {"resource_id": "resource-id-8"}} results = list(self.conn.query_samples(filter_expr=simple_filter)) self.assertEqual(len(self.msgs) - 1, len(results)) for sample_item in results: self.assertNotEqual("resource-id-8", sample_item.resource_id) def test_query_complex_filter(self): self._create_samples() results = list(self.conn.query_samples(filter_expr=( self.complex_filter))) self.assertEqual(6, len(results)) for sample_item in results: self.assertIn(sample_item.resource_id, set(["resource-id-42", "resource-id-44"])) self.assertEqual("cpu_util", sample_item.counter_name) self.assertTrue(sample_item.counter_volume > 0.4) self.assertTrue(sample_item.counter_volume <= 0.8) def test_query_complex_filter_with_limit(self): self._create_samples() limit = 3 results = list(self.conn.query_samples(filter_expr=self.complex_filter, limit=limit)) self.assertEqual(limit, len(results)) def test_query_complex_filter_with_simple_orderby(self): self._create_samples() expected_volume_order = [0.41, 0.41, 0.79, 0.79, 0.8, 0.8] orderby = [{"counter_volume": "asc"}] results = list(self.conn.query_samples(filter_expr=self.complex_filter, orderby=orderby)) self.assertEqual(expected_volume_order, [s.counter_volume for s in results]) def test_query_complex_filter_with_complex_orderby(self): self._create_samples() expected_volume_order = [0.41, 0.41, 0.79, 0.79, 0.8, 0.8] expected_resource_id_order = ["resource-id-44", "resource-id-42", "resource-id-44", "resource-id-42", "resource-id-44", "resource-id-42"] orderby = [{"counter_volume": "asc"}, {"resource_id": "desc"}] results = list(self.conn.query_samples(filter_expr=self.complex_filter, orderby=orderby)) self.assertEqual(expected_volume_order, [s.counter_volume for s in results]) self.assertEqual(expected_resource_id_order, [s.resource_id for s in results]) def test_query_complex_filter_with_list(self): self._create_samples() results = list( self.conn.query_samples(filter_expr=self.complex_filter_list)) self.assertEqual(9, len(results)) for sample_item in results: self.assertIn(sample_item.resource_id, set(["resource-id-42", "resource-id-43", "resource-id-44"])) self.assertEqual("cpu_util", sample_item.counter_name) self.assertTrue(sample_item.counter_volume > 0.4) self.assertTrue(sample_item.counter_volume <= 0.8) def test_query_complex_filter_with_list_with_limit(self): self._create_samples() limit = 3 results = list( self.conn.query_samples(filter_expr=self.complex_filter_list, limit=limit)) self.assertEqual(limit, len(results)) def test_query_complex_filter_with_list_with_simple_orderby(self): self._create_samples() expected_volume_order = [0.41, 0.41, 0.41, 0.79, 0.79, 0.79, 0.8, 0.8, 0.8] orderby = [{"counter_volume": "asc"}] results = list( self.conn.query_samples(filter_expr=self.complex_filter_list, orderby=orderby)) self.assertEqual(expected_volume_order, [s.counter_volume for s in results]) def test_query_complex_filterwith_list_with_complex_orderby(self): self._create_samples() expected_volume_order = [0.41, 0.41, 0.41, 0.79, 0.79, 0.79, 0.8, 0.8, 0.8] expected_resource_id_order = ["resource-id-44", "resource-id-43", "resource-id-42", "resource-id-44", "resource-id-43", "resource-id-42", "resource-id-44", "resource-id-43", "resource-id-42"] orderby = [{"counter_volume": "asc"}, {"resource_id": "desc"}] results = list( self.conn.query_samples(filter_expr=self.complex_filter_list, orderby=orderby)) self.assertEqual(expected_volume_order, [s.counter_volume for s in results]) self.assertEqual(expected_resource_id_order, [s.resource_id for s in results]) def test_query_complex_filter_with_wrong_order_in_orderby(self): self._create_samples() orderby = [{"counter_volume": "not valid order"}, {"resource_id": "desc"}] query = lambda: list(self.conn.query_samples(filter_expr=( self.complex_filter), orderby=orderby)) self.assertRaises(KeyError, query) def test_query_complex_filter_with_in(self): self._create_samples() results = list( self.conn.query_samples(filter_expr=self.complex_filter_in)) self.assertEqual(9, len(results)) for sample_item in results: self.assertIn(sample_item.resource_id, set(["resource-id-42", "resource-id-43", "resource-id-44"])) self.assertEqual("cpu_util", sample_item.counter_name) self.assertTrue(sample_item.counter_volume > 0.4) self.assertTrue(sample_item.counter_volume <= 0.8) def test_query_simple_metadata_filter(self): self._create_samples() filter_expr = {"=": {"resource_metadata.a_bool_key": True}} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(6, len(results)) for sample_item in results: self.assertTrue(sample_item.resource_metadata["a_bool_key"]) def test_query_simple_metadata_with_in_op(self): self._create_samples() filter_expr = {"in": {"resource_metadata.an_int_key": [42, 43]}} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(12, len(results)) for sample_item in results: self.assertIn(sample_item.resource_metadata["an_int_key"], [42, 43]) def test_query_complex_metadata_filter(self): self._create_samples() subfilter = {"or": [{"=": {"resource_metadata.a_string_key": "meta-value0.81"}}, {"<=": {"resource_metadata.a_float_key": 0.41}}]} filter_expr = {"and": [{">": {"resource_metadata.an_int_key": 42}}, subfilter]} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(8, len(results)) for sample_item in results: self.assertTrue((sample_item.resource_metadata["a_string_key"] == "meta-value0.81" or sample_item.resource_metadata["a_float_key"] <= 0.41)) self.assertTrue(sample_item.resource_metadata["an_int_key"] > 42) def test_query_mixed_data_and_metadata_filter(self): self._create_samples() subfilter = {"or": [{"=": {"resource_metadata.a_string_key": "meta-value0.81"}}, {"<=": {"resource_metadata.a_float_key": 0.41}}]} filter_expr = {"and": [{"=": {"resource_id": "resource-id-42"}}, subfilter]} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(4, len(results)) for sample_item in results: self.assertTrue((sample_item.resource_metadata["a_string_key"] == "meta-value0.81" or sample_item.resource_metadata["a_float_key"] <= 0.41)) self.assertEqual("resource-id-42", sample_item.resource_id) def test_query_non_existing_metadata_with_result(self): self._create_samples() filter_expr = { "or": [{"=": {"resource_metadata.a_string_key": "meta-value0.81"}}, {"<=": {"resource_metadata.key_not_exists": 0.41}}]} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(3, len(results)) for sample_item in results: self.assertEqual("meta-value0.81", sample_item.resource_metadata["a_string_key"]) def test_query_non_existing_metadata_without_result(self): self._create_samples() filter_expr = { "or": [{"=": {"resource_metadata.key_not_exists": "meta-value0.81"}}, {"<=": {"resource_metadata.key_not_exists": 0.41}}]} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(0, len(results)) def test_query_negated_metadata(self): self._create_samples() filter_expr = { "and": [{"=": {"resource_id": "resource-id-42"}}, {"not": {"or": [{">": {"resource_metadata.an_int_key": 43}}, {"<=": {"resource_metadata.a_float_key": 0.41}}]}}]} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(3, len(results)) for sample_item in results: self.assertEqual("resource-id-42", sample_item.resource_id) self.assertTrue(sample_item.resource_metadata["an_int_key"] <= 43) self.assertTrue(sample_item.resource_metadata["a_float_key"] > 0.41) def test_query_negated_complex_expression(self): self._create_samples() filter_expr = { "and": [{"=": {"counter_name": "cpu_util"}}, {"not": {"or": [{"or": [{"=": {"resource_id": "resource-id-42"}}, {"=": {"resource_id": "resource-id-44"}}]}, {"and": [{">": {"counter_volume": 0.4}}, {"<": {"counter_volume": 0.8}}]}]}}]} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(4, len(results)) for sample_item in results: self.assertEqual("resource-id-43", sample_item.resource_id) self.assertIn(sample_item.counter_volume, [0.39, 0.4, 0.8, 0.81]) self.assertEqual("cpu_util", sample_item.counter_name) def test_query_with_double_negation(self): self._create_samples() filter_expr = { "and": [{"=": {"counter_name": "cpu_util"}}, {"not": {"or": [{"or": [{"=": {"resource_id": "resource-id-42"}}, {"=": {"resource_id": "resource-id-44"}}]}, {"and": [{"not": {"<=": {"counter_volume": 0.4}}}, {"<": {"counter_volume": 0.8}}]}]}}]} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(4, len(results)) for sample_item in results: self.assertEqual("resource-id-43", sample_item.resource_id) self.assertIn(sample_item.counter_volume, [0.39, 0.4, 0.8, 0.81]) self.assertEqual("cpu_util", sample_item.counter_name) def test_query_negate_not_equal(self): self._create_samples() filter_expr = {"not": {"!=": {"resource_id": "resource-id-43"}}} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(6, len(results)) for sample_item in results: self.assertEqual("resource-id-43", sample_item.resource_id) def test_query_negated_in_op(self): self._create_samples() filter_expr = { "and": [{"not": {"in": {"counter_volume": [0.39, 0.4, 0.79]}}}, {"=": {"resource_id": "resource-id-42"}}]} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(3, len(results)) for sample_item in results: self.assertIn(sample_item.counter_volume, [0.41, 0.8, 0.81]) class StatisticsTest(DBTestBase): def prepare_data(self): for i in range(3): c = sample.Sample( 'volume.size', 'gauge', 'GiB', 5 + i, 'user-id', 'project1', 'resource-id', timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), resource_metadata={'display_name': 'test-volume', 'tag': 'self.counter', }, source='test', ) msg = utils.meter_message_from_counter( c, secret='not-so-secret', ) self.conn.record_metering_data(msg) for i in range(3): c = sample.Sample( 'volume.size', 'gauge', 'GiB', 8 + i, 'user-5', 'project2', 'resource-6', timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), resource_metadata={'display_name': 'test-volume', 'tag': 'self.counter', }, source='test', ) msg = utils.meter_message_from_counter( c, secret='not-so-secret', ) self.conn.record_metering_data(msg) for i in range(3): c = sample.Sample( 'memory', 'gauge', 'MB', 8 + i, 'user-5', 'project2', 'resource-6', timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), resource_metadata={}, source='test', ) msg = utils.meter_message_from_counter( c, secret='not-so-secret', ) self.conn.record_metering_data(msg) def test_by_meter(self): f = storage.SampleFilter( meter='memory' ) results = list(self.conn.get_meter_statistics(f))[0] self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32) - datetime.datetime(2012, 9, 25, 10, 30)).seconds, results.duration) self.assertEqual(3, results.count) self.assertEqual('MB', results.unit) self.assertEqual(8, results.min) self.assertEqual(10, results.max) self.assertEqual(27, results.sum) self.assertEqual(9, results.avg) self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), results.period_start) self.assertEqual(datetime.datetime(2012, 9, 25, 12, 32), results.period_end) def test_by_user(self): f = storage.SampleFilter( user='user-5', meter='volume.size', ) results = list(self.conn.get_meter_statistics(f))[0] self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32) - datetime.datetime(2012, 9, 25, 10, 30)).seconds, results.duration) self.assertEqual(3, results.count) self.assertEqual('GiB', results.unit) self.assertEqual(8, results.min) self.assertEqual(10, results.max) self.assertEqual(27, results.sum) self.assertEqual(9, results.avg) def test_no_period_in_query(self): f = storage.SampleFilter( user='user-5', meter='volume.size', ) results = list(self.conn.get_meter_statistics(f))[0] self.assertEqual(0, results.period) def test_period_is_int(self): f = storage.SampleFilter( meter='volume.size', ) results = list(self.conn.get_meter_statistics(f))[0] self.assertIs(int, type(results.period)) self.assertEqual(6, results.count) def test_by_user_period(self): f = storage.SampleFilter( user='user-5', meter='volume.size', start_timestamp='2012-09-25T10:28:00', ) results = list(self.conn.get_meter_statistics(f, period=7200)) self.assertEqual(2, len(results)) self.assertEqual(set([datetime.datetime(2012, 9, 25, 10, 28), datetime.datetime(2012, 9, 25, 12, 28)]), set(r.period_start for r in results)) self.assertEqual(set([datetime.datetime(2012, 9, 25, 12, 28), datetime.datetime(2012, 9, 25, 14, 28)]), set(r.period_end for r in results)) r = results[0] self.assertEqual(datetime.datetime(2012, 9, 25, 10, 28), r.period_start) self.assertEqual(2, r.count) self.assertEqual('GiB', r.unit) self.assertEqual(8.5, r.avg) self.assertEqual(8, r.min) self.assertEqual(9, r.max) self.assertEqual(17, r.sum) self.assertEqual(7200, r.period) self.assertIsInstance(r.period, int) expected_end = r.period_start + datetime.timedelta(seconds=7200) self.assertEqual(expected_end, r.period_end) self.assertEqual(3660, r.duration) self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), r.duration_start) self.assertEqual(datetime.datetime(2012, 9, 25, 11, 31), r.duration_end) def test_by_user_period_with_timezone(self): dates = [ '2012-09-25T00:28:00-10:00', '2012-09-25T01:28:00-09:00', '2012-09-25T02:28:00-08:00', '2012-09-25T03:28:00-07:00', '2012-09-25T04:28:00-06:00', '2012-09-25T05:28:00-05:00', '2012-09-25T06:28:00-04:00', '2012-09-25T07:28:00-03:00', '2012-09-25T08:28:00-02:00', '2012-09-25T09:28:00-01:00', '2012-09-25T10:28:00Z', '2012-09-25T11:28:00+01:00', '2012-09-25T12:28:00+02:00', '2012-09-25T13:28:00+03:00', '2012-09-25T14:28:00+04:00', '2012-09-25T15:28:00+05:00', '2012-09-25T16:28:00+06:00', '2012-09-25T17:28:00+07:00', '2012-09-25T18:28:00+08:00', '2012-09-25T19:28:00+09:00', '2012-09-25T20:28:00+10:00', '2012-09-25T21:28:00+11:00', '2012-09-25T22:28:00+12:00', ] for date in dates: f = storage.SampleFilter( user='user-5', meter='volume.size', start_timestamp=date ) results = list(self.conn.get_meter_statistics(f, period=7200)) self.assertEqual(2, len(results)) self.assertEqual(set([datetime.datetime(2012, 9, 25, 10, 28), datetime.datetime(2012, 9, 25, 12, 28)]), set(r.period_start for r in results)) self.assertEqual(set([datetime.datetime(2012, 9, 25, 12, 28), datetime.datetime(2012, 9, 25, 14, 28)]), set(r.period_end for r in results)) def test_by_user_period_start_end(self): f = storage.SampleFilter( user='user-5', meter='volume.size', start_timestamp='2012-09-25T10:28:00', end_timestamp='2012-09-25T11:28:00', ) results = list(self.conn.get_meter_statistics(f, period=1800)) self.assertEqual(1, len(results)) r = results[0] self.assertEqual(datetime.datetime(2012, 9, 25, 10, 28), r.period_start) self.assertEqual(1, r.count) self.assertEqual('GiB', r.unit) self.assertEqual(8, r.avg) self.assertEqual(8, r.min) self.assertEqual(8, r.max) self.assertEqual(8, r.sum) self.assertEqual(1800, r.period) self.assertEqual(r.period_start + datetime.timedelta(seconds=1800), r.period_end) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), r.duration_start) self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), r.duration_end) def test_by_project(self): f = storage.SampleFilter( meter='volume.size', resource='resource-id', start_timestamp='2012-09-25T11:30:00', end_timestamp='2012-09-25T11:32:00', ) results = list(self.conn.get_meter_statistics(f))[0] self.assertEqual(0, results.duration) self.assertEqual(1, results.count) self.assertEqual('GiB', results.unit) self.assertEqual(6, results.min) self.assertEqual(6, results.max) self.assertEqual(6, results.sum) self.assertEqual(6, results.avg) def test_one_resource(self): f = storage.SampleFilter( user='user-id', meter='volume.size', ) results = list(self.conn.get_meter_statistics(f))[0] self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32) - datetime.datetime(2012, 9, 25, 10, 30)).seconds, results.duration) self.assertEqual(3, results.count) self.assertEqual('GiB', results.unit) self.assertEqual(5, results.min) self.assertEqual(7, results.max) self.assertEqual(18, results.sum) self.assertEqual(6, results.avg) def test_with_no_sample(self): f = storage.SampleFilter( user='user-not-exists', meter='volume.size', ) results = list(self.conn.get_meter_statistics(f, period=1800)) self.assertEqual([], results) class StatisticsGroupByTest(DBTestBase): def prepare_data(self): test_sample_data = ( {'volume': 2, 'user': 'user-1', 'project': 'project-1', 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', 'source': 'source-2', 'metadata_instance_type': '84'}, {'volume': 2, 'user': 'user-1', 'project': 'project-2', 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', 'source': 'source-2', 'metadata_instance_type': '83'}, {'volume': 1, 'user': 'user-2', 'project': 'project-1', 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11), 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', 'source': 'source-1', 'metadata_instance_type': '82'}, {'volume': 1, 'user': 'user-2', 'project': 'project-1', 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source-1', 'metadata_instance_type': '82'}, {'volume': 2, 'user': 'user-2', 'project': 'project-1', 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source-1', 'metadata_instance_type': '84'}, {'volume': 4, 'user': 'user-2', 'project': 'project-2', 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source-1', 'metadata_instance_type': '82'}, {'volume': 4, 'user': 'user-3', 'project': 'project-1', 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', 'source': 'source-3', 'metadata_instance_type': '83'}, ) for test_sample in test_sample_data: c = sample.Sample( 'instance', sample.TYPE_CUMULATIVE, unit='s', volume=test_sample['volume'], user_id=test_sample['user'], project_id=test_sample['project'], resource_id=test_sample['resource'], timestamp=datetime.datetime(*test_sample['timestamp']), resource_metadata={'flavor': test_sample['metadata_flavor'], 'event': test_sample['metadata_event'], 'instance_type': test_sample['metadata_instance_type']}, source=test_sample['source'], ) msg = utils.meter_message_from_counter( c, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) def test_group_by_user(self): f = storage.SampleFilter( meter='instance', ) results = list(self.conn.get_meter_statistics(f, groupby=['user_id'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['user_id']), groupby_keys_set) self.assertEqual(set(['user-1', 'user-2', 'user-3']), groupby_vals_set) for r in results: if r.groupby == {'user_id': 'user-1'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'user_id': 'user-2'}: self.assertEqual(4, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(8, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'user_id': 'user-3'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) def test_group_by_resource(self): f = storage.SampleFilter( meter='instance', ) results = list(self.conn.get_meter_statistics(f, groupby=['resource_id'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['resource_id']), groupby_keys_set) self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), groupby_vals_set) for r in results: if r.groupby == {'resource_id': 'resource-1'}: self.assertEqual(3, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(6, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'resource_id': 'resource-2'}: self.assertEqual(3, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'resource_id': 'resource-3'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) def test_group_by_project(self): f = storage.SampleFilter( meter='instance', ) results = list(self.conn.get_meter_statistics(f, groupby=['project_id'])) self.assertEqual(2, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) for r in results: if r.groupby == {'project_id': 'project-1'}: self.assertEqual(5, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(10, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'project_id': 'project-2'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(3, r.avg) def test_group_by_source(self): f = storage.SampleFilter( meter='instance', ) results = list(self.conn.get_meter_statistics(f, groupby=['source'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['source']), groupby_keys_set) self.assertEqual(set(['source-1', 'source-2', 'source-3']), groupby_vals_set) for r in results: if r.groupby == {'source': 'source-1'}: self.assertEqual(4, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(8, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'source': 'source-2'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'source': 'source-3'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) def test_group_by_unknown_field(self): f = storage.SampleFilter( meter='instance', ) # NOTE(terriyu): The MongoDB get_meter_statistics() returns a list # whereas the SQLAlchemy get_meter_statistics() returns a generator. # You have to apply list() to the SQLAlchemy generator to get it to # throw an error. The MongoDB get_meter_statistics() will throw an # error before list() is called. By using lambda, we can cover both # MongoDB and SQLAlchemy in a single test. self.assertRaises( ceilometer.NotImplementedError, lambda: list(self.conn.get_meter_statistics(f, groupby=['wtf'])) ) def test_group_by_metadata(self): # This test checks grouping by a single metadata field # (now only resource_metadata.instance_type is available). f = storage.SampleFilter( meter='instance', ) results = list( self.conn.get_meter_statistics( f, groupby=['resource_metadata.instance_type'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['resource_metadata.instance_type']), groupby_keys_set) self.assertEqual(set(['82', '83', '84']), groupby_vals_set) for r in results: if r.groupby == {'resource_metadata.instance_type': '82'}: self.assertEqual(3, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'resource_metadata.instance_type': '83'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(3, r.avg) elif r.groupby == {'resource_metadata.instance_type': '84'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) def test_group_by_multiple_regular(self): f = storage.SampleFilter( meter='instance', ) results = list(self.conn.get_meter_statistics(f, groupby=['user_id', 'resource_id'])) self.assertEqual(4, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['user_id', 'resource_id']), groupby_keys_set) self.assertEqual(set(['user-1', 'user-2', 'user-3', 'resource-1', 'resource-2', 'resource-3']), groupby_vals_set) for r in results: if r.groupby == {'user_id': 'user-1', 'resource_id': 'resource-1'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'user_id': 'user-2', 'resource_id': 'resource-1'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'user_id': 'user-2', 'resource_id': 'resource-2'}: self.assertEqual(3, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'user_id': 'user-3', 'resource_id': 'resource-3'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) else: self.assertNotEqual({'user_id': 'user-1', 'resource_id': 'resource-2'}, r.groupby) self.assertNotEqual({'user_id': 'user-1', 'resource_id': 'resource-3'}, r.groupby) self.assertNotEqual({'user_id': 'user-2', 'resource_id': 'resource-3'}, r.groupby) self.assertNotEqual({'user_id': 'user-3', 'resource_id': 'resource-1'}, r.groupby) self.assertNotEqual({'user_id': 'user-3', 'resource_id': 'resource-2'}, r.groupby, ) def test_group_by_multiple_metadata(self): # TODO(terriyu): test_group_by_multiple_metadata needs to be # implemented. # This test should check grouping by multiple metadata fields. pass def test_group_by_multiple_regular_metadata(self): # This test checks grouping by a combination of regular and # metadata fields. f = storage.SampleFilter( meter='instance', ) results = list( self.conn.get_meter_statistics( f, groupby=['user_id', 'resource_metadata.instance_type'])) self.assertEqual(5, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['user_id', 'resource_metadata.instance_type']), groupby_keys_set) self.assertEqual(set(['user-1', 'user-2', 'user-3', '82', '83', '84']), groupby_vals_set) for r in results: if r.groupby == {'user_id': 'user-1', 'resource_metadata.instance_type': '83'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'user_id': 'user-1', 'resource_metadata.instance_type': '84'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'user_id': 'user-2', 'resource_metadata.instance_type': '82'}: self.assertEqual(3, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'user_id': 'user-2', 'resource_metadata.instance_type': '84'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'user_id': 'user-3', 'resource_metadata.instance_type': '83'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) else: self.assertNotEqual({'user_id': 'user-1', 'resource_metadata.instance_type': '82'}, r.groupby) self.assertNotEqual({'user_id': 'user-2', 'resource_metadata.instance_type': '83'}, r.groupby) self.assertNotEqual({'user_id': 'user-3', 'resource_metadata.instance_type': '82'}, r.groupby) self.assertNotEqual({'user_id': 'user-3', 'resource_metadata.instance_type': '84'}, r.groupby) def test_group_by_with_query_filter(self): f = storage.SampleFilter( meter='instance', project='project-1', ) results = list(self.conn.get_meter_statistics( f, groupby=['resource_id'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['resource_id']), groupby_keys_set) self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), groupby_vals_set) for r in results: if r.groupby == {'resource_id': 'resource-1'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'resource_id': 'resource-2'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(1, r.max) self.assertEqual(2, r.sum) self.assertEqual(1, r.avg) elif r.groupby == {'resource_id': 'resource-3'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) def test_group_by_metadata_with_query_filter(self): # This test checks grouping by a metadata field in combination # with a query filter. f = storage.SampleFilter( meter='instance', project='project-1', ) results = list(self.conn.get_meter_statistics( f, groupby=['resource_metadata.instance_type'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['resource_metadata.instance_type']), groupby_keys_set) self.assertEqual(set(['82', '83', '84']), groupby_vals_set) for r in results: if r.groupby == {'resource_metadata.instance_type': '82'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(1, r.max) self.assertEqual(2, r.sum) self.assertEqual(1, r.avg) elif r.groupby == {'resource_metadata.instance_type': '83'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) elif r.groupby == {'resource_metadata.instance_type': '84'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) def test_group_by_with_query_filter_multiple(self): f = storage.SampleFilter( meter='instance', user='user-2', source='source-1', ) results = list(self.conn.get_meter_statistics( f, groupby=['project_id', 'resource_id'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['project_id', 'resource_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2', 'resource-1', 'resource-2']), groupby_vals_set) for r in results: if r.groupby == {'project_id': 'project-1', 'resource_id': 'resource-1'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'project_id': 'project-1', 'resource_id': 'resource-2'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(1, r.max) self.assertEqual(2, r.sum) self.assertEqual(1, r.avg) elif r.groupby == {'project_id': 'project-2', 'resource_id': 'resource-2'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) else: self.assertNotEqual({'project_id': 'project-2', 'resource_id': 'resource-1'}, r.groupby) def test_group_by_metadata_with_query_filter_multiple(self): # TODO(terriyu): test_group_by_metadata_with_query_filter_multiple # needs to be implemented. # This test should check grouping by multiple metadata fields in # combination with a query filter. pass def test_group_by_with_period(self): f = storage.SampleFilter( meter='instance', ) results = list(self.conn.get_meter_statistics(f, period=7200, groupby=['project_id'])) self.assertEqual(4, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) period_start_set = set([r.period_start for r in results]) period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), datetime.datetime(2013, 8, 1, 14, 11), datetime.datetime(2013, 8, 1, 16, 11)]) self.assertEqual(period_start_valid, period_start_set) for r in results: if (r.groupby == {'project_id': 'project-1'} and r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): self.assertEqual(3, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(2, r.avg) self.assertEqual(4260, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), r.period_end) elif (r.groupby == {'project_id': 'project-1'} and r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) self.assertEqual(4260, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), r.period_end) elif (r.groupby == {'project_id': 'project-2'} and r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), r.period_end) elif (r.groupby == {'project_id': 'project-2'} and r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11), r.period_end) else: self.assertNotEqual([{'project_id': 'project-1'}, datetime.datetime(2013, 8, 1, 16, 11)], [r.groupby, r.period_start]) self.assertNotEqual([{'project_id': 'project-2'}, datetime.datetime(2013, 8, 1, 10, 11)], [r.groupby, r.period_start]) def test_group_by_metadata_with_period(self): # This test checks grouping by metadata fields in combination # with period grouping. f = storage.SampleFilter( meter='instance') results = list(self.conn.get_meter_statistics(f, period=7200, groupby=['resource_metadata.instance_type'])) self.assertEqual(5, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['resource_metadata.instance_type']), groupby_keys_set) self.assertEqual(set(['82', '83', '84']), groupby_vals_set) period_start_set = set([r.period_start for r in results]) period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), datetime.datetime(2013, 8, 1, 14, 11), datetime.datetime(2013, 8, 1, 16, 11)]) self.assertEqual(period_start_valid, period_start_set) for r in results: if (r.groupby == {'resource_metadata.instance_type': '82'} and r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(1, r.max) self.assertEqual(2, r.sum) self.assertEqual(1, r.avg) self.assertEqual(1740, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), r.period_end) elif (r.groupby == {'resource_metadata.instance_type': '82'} and r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11), r.period_end) elif (r.groupby == {'resource_metadata.instance_type': '83'} and r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), r.period_end) elif (r.groupby == {'resource_metadata.instance_type': '83'} and r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), r.period_end) elif (r.groupby == {'resource_metadata.instance_type': '84'} and r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) self.assertEqual(4260, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), r.period_end) else: self.assertNotEqual([{'resource_metadata.instance_type': '82'}, datetime.datetime(2013, 8, 1, 14, 11)], [r.groupby, r.period_start]) self.assertNotEqual([{'resource_metadata.instance_type': '83'}, datetime.datetime(2013, 8, 1, 16, 11)], [r.groupby, r.period_start]) self.assertNotEqual([{'resource_metadata.instance_type': '84'}, datetime.datetime(2013, 8, 1, 10, 11)], [r.groupby, r.period_start]) self.assertNotEqual([{'resource_metadata.instance_type': '84'}, datetime.datetime(2013, 8, 1, 16, 11)], [r.groupby, r.period_start]) def test_group_by_with_query_filter_and_period(self): f = storage.SampleFilter( meter='instance', source='source-1', ) results = list(self.conn.get_meter_statistics(f, period=7200, groupby=['project_id'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) period_start_set = set([r.period_start for r in results]) period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), datetime.datetime(2013, 8, 1, 14, 11), datetime.datetime(2013, 8, 1, 16, 11)]) self.assertEqual(period_start_valid, period_start_set) for r in results: if (r.groupby == {'project_id': 'project-1'} and r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(1, r.max) self.assertEqual(2, r.sum) self.assertEqual(1, r.avg) self.assertEqual(1740, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), r.period_end) elif (r.groupby == {'project_id': 'project-1'} and r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), r.period_end) elif (r.groupby == {'project_id': 'project-2'} and r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11), r.period_end) else: self.assertNotEqual([{'project_id': 'project-1'}, datetime.datetime(2013, 8, 1, 16, 11)], [r.groupby, r.period_start]) self.assertNotEqual([{'project_id': 'project-2'}, datetime.datetime(2013, 8, 1, 10, 11)], [r.groupby, r.period_start]) def test_group_by_metadata_with_query_filter_and_period(self): # This test checks grouping with metadata fields in combination # with a query filter and period grouping. f = storage.SampleFilter( meter='instance', project='project-1', ) results = list( self.conn.get_meter_statistics( f, period=7200, groupby=['resource_metadata.instance_type'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['resource_metadata.instance_type']), groupby_keys_set) self.assertEqual(set(['82', '83', '84']), groupby_vals_set) period_start_set = set([r.period_start for r in results]) period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), datetime.datetime(2013, 8, 1, 14, 11)]) self.assertEqual(period_start_valid, period_start_set) for r in results: if (r.groupby == {'resource_metadata.instance_type': '82'} and r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(1, r.max) self.assertEqual(2, r.sum) self.assertEqual(1, r.avg) self.assertEqual(1740, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), r.period_end) elif (r.groupby == {'resource_metadata.instance_type': '83'} and r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), r.period_end) elif (r.groupby == {'resource_metadata.instance_type': '84'} and r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) self.assertEqual(4260, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), r.period_end) else: self.assertNotEqual([{'resource_metadata.instance_type': '82'}, datetime.datetime(2013, 8, 1, 14, 11)], [r.groupby, r.period_start]) self.assertNotEqual([{'resource_metadata.instance_type': '83'}, datetime.datetime(2013, 8, 1, 14, 11)], [r.groupby, r.period_start]) self.assertNotEqual([{'resource_metadata.instance_type': '84'}, datetime.datetime(2013, 8, 1, 10, 11)], [r.groupby, r.period_start]) def test_group_by_start_timestamp_after(self): f = storage.SampleFilter( meter='instance', start_timestamp=datetime.datetime(2013, 8, 1, 17, 28, 1), ) results = list(self.conn.get_meter_statistics(f, groupby=['project_id'])) self.assertEqual([], results) def test_group_by_end_timestamp_before(self): f = storage.SampleFilter( meter='instance', end_timestamp=datetime.datetime(2013, 8, 1, 10, 10, 59), ) results = list(self.conn.get_meter_statistics(f, groupby=['project_id'])) self.assertEqual([], results) def test_group_by_start_timestamp(self): f = storage.SampleFilter( meter='instance', start_timestamp=datetime.datetime(2013, 8, 1, 14, 58), ) results = list(self.conn.get_meter_statistics(f, groupby=['project_id'])) self.assertEqual(2, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) for r in results: if r.groupby == {'project_id': 'project-1'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'project_id': 'project-2'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(3, r.avg) def test_group_by_end_timestamp(self): f = storage.SampleFilter( meter='instance', end_timestamp=datetime.datetime(2013, 8, 1, 11, 45), ) results = list(self.conn.get_meter_statistics(f, groupby=['project_id'])) self.assertEqual(1, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1']), groupby_vals_set) for r in results: if r.groupby == {'project_id': 'project-1'}: self.assertEqual(3, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(2, r.avg) def test_group_by_start_end_timestamp(self): f = storage.SampleFilter( meter='instance', start_timestamp=datetime.datetime(2013, 8, 1, 8, 17, 3), end_timestamp=datetime.datetime(2013, 8, 1, 23, 59, 59), ) results = list(self.conn.get_meter_statistics(f, groupby=['project_id'])) self.assertEqual(2, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) for r in results: if r.groupby == {'project_id': 'project-1'}: self.assertEqual(5, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(10, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'project_id': 'project-2'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(3, r.avg) def test_group_by_start_end_timestamp_with_query_filter(self): f = storage.SampleFilter( meter='instance', project='project-1', start_timestamp=datetime.datetime(2013, 8, 1, 11, 1), end_timestamp=datetime.datetime(2013, 8, 1, 20, 0), ) results = list(self.conn.get_meter_statistics(f, groupby=['resource_id'])) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['resource_id']), groupby_keys_set) self.assertEqual(set(['resource-1', 'resource-3']), groupby_vals_set) for r in results: if r.groupby == {'resource_id': 'resource-1'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'resource_id': 'resource-3'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) def test_group_by_start_end_timestamp_with_period(self): f = storage.SampleFilter( meter='instance', start_timestamp=datetime.datetime(2013, 8, 1, 14, 0), end_timestamp=datetime.datetime(2013, 8, 1, 17, 0), ) results = list(self.conn.get_meter_statistics(f, period=3600, groupby=['project_id'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) period_start_set = set([r.period_start for r in results]) period_start_valid = set([datetime.datetime(2013, 8, 1, 14, 0), datetime.datetime(2013, 8, 1, 15, 0), datetime.datetime(2013, 8, 1, 16, 0)]) self.assertEqual(period_start_valid, period_start_set) for r in results: if (r.groupby == {'project_id': 'project-1'} and r.period_start == datetime.datetime(2013, 8, 1, 14, 0)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), r.duration_end) self.assertEqual(3600, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 15, 0), r.period_end) elif (r.groupby == {'project_id': 'project-1'} and r.period_start == datetime.datetime(2013, 8, 1, 16, 0)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), r.duration_end) self.assertEqual(3600, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 17, 0), r.period_end) elif (r.groupby == {'project_id': 'project-2'} and r.period_start == datetime.datetime(2013, 8, 1, 15, 0)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), r.duration_end) self.assertEqual(3600, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 0), r.period_end) else: self.assertNotEqual([{'project_id': 'project-1'}, datetime.datetime(2013, 8, 1, 15, 0)], [r.groupby, r.period_start]) self.assertNotEqual([{'project_id': 'project-2'}, datetime.datetime(2013, 8, 1, 14, 0)], [r.groupby, r.period_start]) self.assertNotEqual([{'project_id': 'project-2'}, datetime.datetime(2013, 8, 1, 16, 0)], [r.groupby, r.period_start]) def test_group_by_start_end_timestamp_with_query_filter_and_period(self): f = storage.SampleFilter( meter='instance', source='source-1', start_timestamp=datetime.datetime(2013, 8, 1, 10, 0), end_timestamp=datetime.datetime(2013, 8, 1, 18, 0), ) results = list(self.conn.get_meter_statistics(f, period=7200, groupby=['project_id'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) period_start_set = set([r.period_start for r in results]) period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 0), datetime.datetime(2013, 8, 1, 14, 0), datetime.datetime(2013, 8, 1, 16, 0)]) self.assertEqual(period_start_valid, period_start_set) for r in results: if (r.groupby == {'project_id': 'project-1'} and r.period_start == datetime.datetime(2013, 8, 1, 10, 0)): self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(1, r.max) self.assertEqual(2, r.sum) self.assertEqual(1, r.avg) self.assertEqual(1740, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 12, 0), r.period_end) elif (r.groupby == {'project_id': 'project-1'} and r.period_start == datetime.datetime(2013, 8, 1, 14, 0)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 0), r.period_end) elif (r.groupby == {'project_id': 'project-2'} and r.period_start == datetime.datetime(2013, 8, 1, 16, 0)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 18, 0), r.period_end) else: self.assertNotEqual([{'project_id': 'project-1'}, datetime.datetime(2013, 8, 1, 16, 0)], [r.groupby, r.period_start]) self.assertNotEqual([{'project_id': 'project-2'}, datetime.datetime(2013, 8, 1, 10, 0)], [r.groupby, r.period_start]) self.assertNotEqual([{'project_id': 'project-2'}, datetime.datetime(2013, 8, 1, 14, 0)], [r.groupby, r.period_start]) class CounterDataTypeTest(DBTestBase): def prepare_data(self): c = sample.Sample( 'dummyBigCounter', sample.TYPE_CUMULATIVE, unit='', volume=337203685477580, user_id='user-id', project_id='project-id', resource_id='resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={}, source='test-1', ) msg = utils.meter_message_from_counter( c, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) c = sample.Sample( 'dummySmallCounter', sample.TYPE_CUMULATIVE, unit='', volume=-337203685477580, user_id='user-id', project_id='project-id', resource_id='resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={}, source='test-1', ) msg = utils.meter_message_from_counter( c, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) c = sample.Sample( 'floatCounter', sample.TYPE_CUMULATIVE, unit='', volume=1938495037.53697, user_id='user-id', project_id='project-id', resource_id='resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={}, source='test-1', ) msg = utils.meter_message_from_counter( c, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) def test_storage_can_handle_large_values(self): f = storage.SampleFilter( meter='dummyBigCounter', ) results = list(self.conn.get_samples(f)) self.assertEqual(337203685477580, results[0].counter_volume) f = storage.SampleFilter( meter='dummySmallCounter', ) results = list(self.conn.get_samples(f)) observed_num = int(results[0].counter_volume) self.assertEqual(-337203685477580, observed_num) def test_storage_can_handle_float_values(self): f = storage.SampleFilter( meter='floatCounter', ) results = list(self.conn.get_samples(f)) self.assertEqual(1938495037.53697, results[0].counter_volume) class EventTestBase(tests_db.TestBase): """Separate test base class. We don't want to inherit all the Meter stuff. """ def setUp(self): super(EventTestBase, self).setUp() self.prepare_data() def prepare_data(self): self.event_models = [] base = 0 self.start = datetime.datetime(2013, 12, 31, 5, 0) now = self.start for event_type in ['Foo', 'Bar', 'Zoo', 'Foo', 'Bar', 'Zoo']: trait_models = [event_models.Trait(name, dtype, value) for name, dtype, value in [ ('trait_A', event_models.Trait.TEXT_TYPE, "my_%s_text" % event_type), ('trait_B', event_models.Trait.INT_TYPE, base + 1), ('trait_C', event_models.Trait.FLOAT_TYPE, float(base) + 0.123456), ('trait_D', event_models.Trait.DATETIME_TYPE, now)]] self.event_models.append( event_models.Event("id_%s_%d" % (event_type, base), event_type, now, trait_models, {'status': {'nested': 'started'}})) base += 100 now = now + datetime.timedelta(hours=1) self.end = now self.event_conn.record_events(self.event_models) @tests_db.run_with('sqlite', 'mysql', 'pgsql') class EventTTLTest(EventTestBase): @mock.patch.object(timeutils, 'utcnow') def test_clear_expired_event_data(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime(2013, 12, 31, 10, 0) self.event_conn.clear_expired_event_data(3600) events = list(self.event_conn.get_events(storage.EventFilter())) self.assertEqual(2, len(events)) event_types = list(self.event_conn.get_event_types()) self.assertEqual(['Bar', 'Zoo'], event_types) for event_type in event_types: trait_types = list(self.event_conn.get_trait_types(event_type)) self.assertEqual(4, len(trait_types)) traits = list(self.event_conn.get_traits(event_type)) self.assertEqual(4, len(traits)) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'db2') class EventTest(EventTestBase): def test_duplicate_message_id(self): now = datetime.datetime.utcnow() m = [event_models.Event("1", "Foo", now, None, {}), event_models.Event("1", "Zoo", now, [], {})] with mock.patch('%s.LOG' % self.event_conn.record_events.__module__) as log: self.event_conn.record_events(m) self.assertEqual(1, log.info.call_count) def test_bad_event(self): now = datetime.datetime.utcnow() broken_event = event_models.Event("1", "Foo", now, None, {}) del(broken_event.__dict__['raw']) m = [broken_event, broken_event] with mock.patch('%s.LOG' % self.event_conn.record_events.__module__) as log: self.assertRaises(AttributeError, self.event_conn.record_events, m) # ensure that record_events does not break on first error but # delays exception and tries to record each event. self.assertEqual(2, log.exception.call_count) class GetEventTest(EventTestBase): def test_generated_is_datetime(self): event_filter = storage.EventFilter(self.start, self.end) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(6, len(events)) for i, event in enumerate(events): self.assertIsInstance(event.generated, datetime.datetime) self.assertEqual(event.generated, self.event_models[i].generated) model_traits = self.event_models[i].traits for j, trait in enumerate(event.traits): if trait.dtype == event_models.Trait.DATETIME_TYPE: self.assertIsInstance(trait.value, datetime.datetime) self.assertEqual(trait.value, model_traits[j].value) def test_simple_get(self): event_filter = storage.EventFilter(self.start, self.end) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(6, len(events)) start_time = None for i, type in enumerate(['Foo', 'Bar', 'Zoo']): self.assertEqual(type, events[i].event_type) self.assertEqual(4, len(events[i].traits)) # Ensure sorted results ... if start_time is not None: # Python 2.6 has no assertLess :( self.assertTrue(start_time < events[i].generated) start_time = events[i].generated def test_simple_get_event_type(self): expected_trait_values = { 'id_Bar_100': { 'trait_A': 'my_Bar_text', 'trait_B': 101, 'trait_C': 100.123456, 'trait_D': self.start + datetime.timedelta(hours=1) }, 'id_Bar_400': { 'trait_A': 'my_Bar_text', 'trait_B': 401, 'trait_C': 400.123456, 'trait_D': self.start + datetime.timedelta(hours=4) } } event_filter = storage.EventFilter(self.start, self.end, "Bar") events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(2, len(events)) self.assertEqual("Bar", events[0].event_type) self.assertEqual("Bar", events[1].event_type) self.assertEqual(4, len(events[0].traits)) self.assertEqual(4, len(events[1].traits)) for event in events: trait_values = expected_trait_values.get(event.message_id, None) if not trait_values: self.fail("Unexpected event ID returned:" % event.message_id) for trait in event.traits: expected_val = trait_values.get(trait.name) if not expected_val: self.fail("Unexpected trait type: %s" % trait.dtype) self.assertEqual(expected_val, trait.value) def test_get_event_trait_filter(self): trait_filters = [{'key': 'trait_B', 'integer': 101}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("Bar", events[0].event_type) self.assertEqual(4, len(events[0].traits)) def test_get_event_trait_filter_op_string(self): trait_filters = [{'key': 'trait_A', 'string': 'my_Foo_text', 'op': 'eq'}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(2, len(events)) self.assertEqual("Foo", events[0].event_type) self.assertEqual(4, len(events[0].traits)) trait_filters[0].update({'key': 'trait_A', 'op': 'lt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(2, len(events)) self.assertEqual("Bar", events[0].event_type) trait_filters[0].update({'key': 'trait_A', 'op': 'le'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(4, len(events)) self.assertEqual("Bar", events[1].event_type) trait_filters[0].update({'key': 'trait_A', 'op': 'ne'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(4, len(events)) self.assertEqual("Zoo", events[3].event_type) trait_filters[0].update({'key': 'trait_A', 'op': 'gt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(2, len(events)) self.assertEqual("Zoo", events[0].event_type) trait_filters[0].update({'key': 'trait_A', 'op': 'ge'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(4, len(events)) self.assertEqual("Foo", events[2].event_type) def test_get_event_trait_filter_op_integer(self): trait_filters = [{'key': 'trait_B', 'integer': 101, 'op': 'eq'}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("Bar", events[0].event_type) self.assertEqual(4, len(events[0].traits)) trait_filters[0].update({'key': 'trait_B', 'op': 'lt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("Foo", events[0].event_type) trait_filters[0].update({'key': 'trait_B', 'op': 'le'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(2, len(events)) self.assertEqual("Bar", events[1].event_type) trait_filters[0].update({'key': 'trait_B', 'op': 'ne'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(5, len(events)) self.assertEqual("Zoo", events[4].event_type) trait_filters[0].update({'key': 'trait_B', 'op': 'gt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(4, len(events)) self.assertEqual("Zoo", events[0].event_type) trait_filters[0].update({'key': 'trait_B', 'op': 'ge'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(5, len(events)) self.assertEqual("Foo", events[2].event_type) def test_get_event_trait_filter_op_float(self): trait_filters = [{'key': 'trait_C', 'float': 300.123456, 'op': 'eq'}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("Foo", events[0].event_type) self.assertEqual(4, len(events[0].traits)) trait_filters[0].update({'key': 'trait_C', 'op': 'lt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(3, len(events)) self.assertEqual("Zoo", events[2].event_type) trait_filters[0].update({'key': 'trait_C', 'op': 'le'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(4, len(events)) self.assertEqual("Bar", events[1].event_type) trait_filters[0].update({'key': 'trait_C', 'op': 'ne'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(5, len(events)) self.assertEqual("Zoo", events[2].event_type) trait_filters[0].update({'key': 'trait_C', 'op': 'gt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(2, len(events)) self.assertEqual("Bar", events[0].event_type) trait_filters[0].update({'key': 'trait_C', 'op': 'ge'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(3, len(events)) self.assertEqual("Zoo", events[2].event_type) def test_get_event_trait_filter_op_datetime(self): trait_filters = [{'key': 'trait_D', 'datetime': self.start + datetime.timedelta(hours=2), 'op': 'eq'}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("Zoo", events[0].event_type) self.assertEqual(4, len(events[0].traits)) trait_filters[0].update({'key': 'trait_D', 'op': 'lt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(2, len(events)) trait_filters[0].update({'key': 'trait_D', 'op': 'le'}) self.assertEqual("Bar", events[1].event_type) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(3, len(events)) self.assertEqual("Bar", events[1].event_type) trait_filters[0].update({'key': 'trait_D', 'op': 'ne'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(5, len(events)) self.assertEqual("Foo", events[2].event_type) trait_filters[0].update({'key': 'trait_D', 'op': 'gt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(3, len(events)) self.assertEqual("Zoo", events[2].event_type) trait_filters[0].update({'key': 'trait_D', 'op': 'ge'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(4, len(events)) self.assertEqual("Bar", events[2].event_type) def test_get_event_multiple_trait_filter(self): trait_filters = [{'key': 'trait_B', 'integer': 1}, {'key': 'trait_A', 'string': 'my_Foo_text'}, {'key': 'trait_C', 'float': 0.123456}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("Foo", events[0].event_type) self.assertEqual(4, len(events[0].traits)) def test_get_event_multiple_trait_filter_expect_none(self): trait_filters = [{'key': 'trait_B', 'integer': 1}, {'key': 'trait_A', 'string': 'my_Zoo_text'}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(0, len(events)) def test_get_event_types(self): event_types = [e for e in self.event_conn.get_event_types()] self.assertEqual(3, len(event_types)) self.assertIn("Bar", event_types) self.assertIn("Foo", event_types) self.assertIn("Zoo", event_types) def test_get_trait_types(self): trait_types = [tt for tt in self.event_conn.get_trait_types("Foo")] self.assertEqual(4, len(trait_types)) trait_type_names = map(lambda x: x['name'], trait_types) self.assertIn("trait_A", trait_type_names) self.assertIn("trait_B", trait_type_names) self.assertIn("trait_C", trait_type_names) self.assertIn("trait_D", trait_type_names) def test_get_trait_types_unknown_event(self): trait_types = [tt for tt in self.event_conn.get_trait_types("Moo")] self.assertEqual(0, len(trait_types)) def test_get_traits(self): traits = self.event_conn.get_traits("Bar") # format results in a way that makes them easier to work with trait_dict = {} for trait in traits: trait_dict[trait.name] = trait.dtype self.assertIn("trait_A", trait_dict) self.assertEqual(event_models.Trait.TEXT_TYPE, trait_dict["trait_A"]) self.assertIn("trait_B", trait_dict) self.assertEqual(event_models.Trait.INT_TYPE, trait_dict["trait_B"]) self.assertIn("trait_C", trait_dict) self.assertEqual(event_models.Trait.FLOAT_TYPE, trait_dict["trait_C"]) self.assertIn("trait_D", trait_dict) self.assertEqual(event_models.Trait.DATETIME_TYPE, trait_dict["trait_D"]) def test_get_all_traits(self): traits = self.event_conn.get_traits("Foo") traits = sorted([t for t in traits], key=operator.attrgetter('dtype')) self.assertEqual(8, len(traits)) trait = traits[0] self.assertEqual("trait_A", trait.name) self.assertEqual(event_models.Trait.TEXT_TYPE, trait.dtype) def test_simple_get_event_no_traits(self): new_events = [event_models.Event("id_notraits", "NoTraits", self.start, [], {})] self.event_conn.record_events(new_events) event_filter = storage.EventFilter(self.start, self.end, "NoTraits") events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("id_notraits", events[0].message_id) self.assertEqual("NoTraits", events[0].event_type) self.assertEqual(0, len(events[0].traits)) def test_simple_get_no_filters(self): event_filter = storage.EventFilter(None, None, None) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(6, len(events)) def test_get_by_message_id(self): new_events = [event_models.Event("id_testid", "MessageIDTest", self.start, [], {})] self.event_conn.record_events(new_events) event_filter = storage.EventFilter(message_id="id_testid") events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(1, len(events)) event = events[0] self.assertEqual("id_testid", event.message_id) def test_simple_get_raw(self): event_filter = storage.EventFilter() events = [event for event in self.event_conn.get_events(event_filter)] self.assertTrue(events) self.assertEqual({'status': {'nested': 'started'}}, events[0].raw) def test_trait_type_enforced_on_none(self): new_events = [event_models.Event( "id_testid", "MessageIDTest", self.start, [event_models.Trait('text', event_models.Trait.TEXT_TYPE, ''), event_models.Trait('int', event_models.Trait.INT_TYPE, 0), event_models.Trait('float', event_models.Trait.FLOAT_TYPE, 0.0)], {})] self.event_conn.record_events(new_events) event_filter = storage.EventFilter(message_id="id_testid") events = [event for event in self.event_conn.get_events(event_filter)] options = [(event_models.Trait.TEXT_TYPE, ''), (event_models.Trait.INT_TYPE, 0.0), (event_models.Trait.FLOAT_TYPE, 0.0)] for trait in events[0].traits: options.remove((trait.dtype, trait.value)) class BigIntegerTest(tests_db.TestBase): def test_metadata_bigint(self): metadata = {'bigint': 99999999999999} s = sample.Sample(name='name', type=sample.TYPE_GAUGE, unit='B', volume=1, user_id='user-id', project_id='project-id', resource_id='resource-id', timestamp=datetime.datetime.utcnow(), resource_metadata=metadata) msg = utils.meter_message_from_counter( s, self.CONF.publisher.telemetry_secret) self.conn.record_metering_data(msg) @tests_db.run_with('mongodb') class MongoAutoReconnectTest(DBTestBase): def setUp(self): super(MongoAutoReconnectTest, self).setUp() self.CONF.set_override('retry_interval', 0, group='database') def test_mongo_client(self): self.assertIsInstance(self.conn.conn.conn, pymongo.MongoClient) def test_mongo_cursor_next(self): expected_first_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 39) raise_exc = [False, True] method = self.conn.db.resource.find().cursor.next with mock.patch('pymongo.cursor.Cursor.next', mock.Mock()) as mock_next: mock_next.side_effect = self.create_side_effect( method, pymongo.errors.AutoReconnect, raise_exc) resource = self.conn.db.resource.find().next() self.assertEqual(expected_first_sample_timestamp, resource['first_sample_timestamp']) def test_mongo_insert(self): raise_exc = [False, True] method = self.conn.db.meter.insert with mock.patch('pymongo.collection.Collection.insert', mock.Mock(return_value=method)) as mock_insert: mock_insert.side_effect = self.create_side_effect( method, pymongo.errors.AutoReconnect, raise_exc) mock_insert.__name__ = 'insert' self.create_and_store_sample( timestamp=datetime.datetime(2014, 10, 15, 14, 39), source='test-proxy') meters = list(self.conn.db.meter.find()) self.assertEqual(12, len(meters)) def test_mongo_find_and_modify(self): raise_exc = [False, True] method = self.conn.db.resource.find_and_modify with mock.patch('pymongo.collection.Collection.find_and_modify', mock.Mock()) as mock_fam: mock_fam.side_effect = self.create_side_effect( method, pymongo.errors.AutoReconnect, raise_exc) mock_fam.__name__ = 'find_and_modify' self.create_and_store_sample( timestamp=datetime.datetime(2014, 10, 15, 14, 39), source='test-proxy') data = self.conn.db.resource.find( {'last_sample_timestamp': datetime.datetime(2014, 10, 15, 14, 39)})[0]['source'] self.assertEqual('test-proxy', data) def test_mongo_update(self): raise_exc = [False, True] method = self.conn.db.resource.update with mock.patch('pymongo.collection.Collection.update', mock.Mock()) as mock_update: mock_update.side_effect = self.create_side_effect( method, pymongo.errors.AutoReconnect, raise_exc) mock_update.__name__ = 'update' self.create_and_store_sample( timestamp=datetime.datetime(2014, 10, 15, 17, 39), source='test-proxy-update') data = self.conn.db.resource.find( {'last_sample_timestamp': datetime.datetime(2014, 10, 15, 17, 39)})[0]['source'] self.assertEqual('test-proxy-update', data) @tests_db.run_with('mongodb') class MongoTimeToLiveTest(DBTestBase): def test_ensure_index(self): cfg.CONF.set_override('metering_time_to_live', 5, group='database') self.conn.upgrade() self.assertEqual(5, self.conn.db.resource.index_information() ['resource_ttl']['expireAfterSeconds']) self.assertEqual(5, self.conn.db.meter.index_information() ['meter_ttl']['expireAfterSeconds']) def test_modification_of_index(self): cfg.CONF.set_override('metering_time_to_live', 5, group='database') self.conn.upgrade() cfg.CONF.set_override('metering_time_to_live', 15, group='database') self.conn.upgrade() self.assertEqual(15, self.conn.db.resource.index_information() ['resource_ttl']['expireAfterSeconds']) self.assertEqual(15, self.conn.db.meter.index_information() ['meter_ttl']['expireAfterSeconds']) class TestRecordUnicodeSamples(DBTestBase): def prepare_data(self): self.msgs = [] self.msgs.append(self.create_and_store_sample( name=u'meter.accent\xe9\u0437', metadata={u"metadata_key\xe9\u0437": "test", u"metadata_key": u"test\xe9\u0437"}, )) def test_unicode_sample(self): f = storage.SampleFilter() results = list(self.conn.get_samples(f)) self.assertEqual(1, len(results)) expected = self.msgs[0] actual = results[0].as_dict() self.assertEqual(expected['counter_name'], actual['counter_name']) self.assertEqual(expected['resource_metadata'], actual['resource_metadata']) ceilometer-6.1.5/ceilometer/tests/functional/storage/test_impl_hbase.py0000664000567000056710000000774013072744706027652 0ustar jenkinsjenkins00000000000000# # Copyright 2012, 2013 Dell Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/storage/impl_hbase.py .. note:: In order to run the tests against real HBase server set the environment variable CEILOMETER_TEST_HBASE_URL to point to that HBase instance before running the tests. Make sure the Thrift server is running on that server. """ import mock try: import happybase # noqa except ImportError: import testtools.testcase raise testtools.testcase.TestSkipped("happybase is needed") from ceilometer.event.storage import impl_hbase as hbase_event from ceilometer.storage import impl_hbase as hbase from ceilometer.tests import base as test_base from ceilometer.tests import db as tests_db class ConnectionTest(tests_db.TestBase): @tests_db.run_with('hbase') def test_hbase_connection(self): class TestConn(object): def __init__(self, host, port): self.netloc = '%s:%s' % (host, port) def open(self): pass def get_connection_pool(conf): return TestConn(conf['host'], conf['port']) with mock.patch.object(hbase.Connection, '_get_connection_pool', side_effect=get_connection_pool): conn = hbase.Connection('hbase://test_hbase:9090') self.assertIsInstance(conn.conn_pool, TestConn) class CapabilitiesTest(test_base.BaseTestCase): # Check the returned capabilities list, which is specific to each DB # driver def test_capabilities(self): expected_capabilities = { 'meters': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'resources': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'samples': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'statistics': {'groupby': False, 'query': {'simple': True, 'metadata': True, 'complex': False}, 'aggregation': {'standard': True, 'selectable': { 'max': False, 'min': False, 'sum': False, 'avg': False, 'count': False, 'stddev': False, 'cardinality': False}} }, } actual_capabilities = hbase.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) def test_event_capabilities(self): expected_capabilities = { 'events': {'query': {'simple': True}}, } actual_capabilities = hbase_event.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) def test_storage_capabilities(self): expected_capabilities = { 'storage': {'production_ready': True}, } actual_capabilities = hbase.Connection.get_storage_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) ceilometer-6.1.5/ceilometer/tests/functional/storage/test_impl_sqlalchemy.py0000664000567000056710000002140213072744706030721 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/storage/impl_sqlalchemy.py .. note:: In order to run the tests against real SQL server set the environment variable CEILOMETER_TEST_SQL_URL to point to a SQL server before running the tests. """ import datetime import warnings import mock from oslo_db import exception from oslo_utils import timeutils from six.moves import reprlib from ceilometer.event.storage import impl_sqlalchemy as impl_sqla_event from ceilometer.event.storage import models from ceilometer.publisher import utils from ceilometer import sample from ceilometer.storage import impl_sqlalchemy from ceilometer.storage.sqlalchemy import models as sql_models from ceilometer.tests import base as test_base from ceilometer.tests import db as tests_db from ceilometer.tests.functional.storage \ import test_storage_scenarios as scenarios @tests_db.run_with('sqlite', 'mysql', 'pgsql') class CeilometerBaseTest(tests_db.TestBase): def test_ceilometer_base(self): base = sql_models.CeilometerBase() base['key'] = 'value' self.assertEqual('value', base['key']) @tests_db.run_with('sqlite') class EngineFacadeTest(tests_db.TestBase): @mock.patch.object(warnings, 'warn') def test_no_not_supported_warning(self, mocked): impl_sqlalchemy.Connection('sqlite://') impl_sqla_event.Connection('sqlite://') self.assertNotIn(mock.call(mock.ANY, exception.NotSupportedWarning), mocked.call_args_list) @tests_db.run_with('sqlite', 'mysql', 'pgsql') class EventTypeTest(tests_db.TestBase): # EventType is a construct specific to sqlalchemy # Not applicable to other drivers. def test_event_type_exists(self): et1 = self.event_conn._get_or_create_event_type("foo") self.assertTrue(et1.id >= 0) et2 = self.event_conn._get_or_create_event_type("foo") self.assertEqual(et2.id, et1.id) self.assertEqual(et2.desc, et1.desc) def test_event_type_unique(self): et1 = self.event_conn._get_or_create_event_type("foo") self.assertTrue(et1.id >= 0) et2 = self.event_conn._get_or_create_event_type("blah") self.assertNotEqual(et1.id, et2.id) self.assertNotEqual(et1.desc, et2.desc) # Test the method __repr__ returns a string self.assertTrue(reprlib.repr(et2)) @tests_db.run_with('sqlite', 'mysql', 'pgsql') class EventTest(tests_db.TestBase): def _verify_data(self, trait, trait_table): now = datetime.datetime.utcnow() ev = models.Event('1', 'name', now, [trait], {}) self.event_conn.record_events([ev]) session = self.event_conn._engine_facade.get_session() t_tables = [sql_models.TraitText, sql_models.TraitFloat, sql_models.TraitInt, sql_models.TraitDatetime] for table in t_tables: if table == trait_table: self.assertEqual(1, session.query(table).count()) else: self.assertEqual(0, session.query(table).count()) def test_string_traits(self): model = models.Trait("Foo", models.Trait.TEXT_TYPE, "my_text") self._verify_data(model, sql_models.TraitText) def test_int_traits(self): model = models.Trait("Foo", models.Trait.INT_TYPE, 100) self._verify_data(model, sql_models.TraitInt) def test_float_traits(self): model = models.Trait("Foo", models.Trait.FLOAT_TYPE, 123.456) self._verify_data(model, sql_models.TraitFloat) def test_datetime_traits(self): now = datetime.datetime.utcnow() model = models.Trait("Foo", models.Trait.DATETIME_TYPE, now) self._verify_data(model, sql_models.TraitDatetime) def test_event_repr(self): ev = sql_models.Event('msg_id', None, False, {}) ev.id = 100 self.assertTrue(reprlib.repr(ev)) @tests_db.run_with('sqlite', 'mysql', 'pgsql') class RelationshipTest(scenarios.DBTestBase): # Note: Do not derive from SQLAlchemyEngineTestBase, since we # don't want to automatically inherit all the Meter setup. @mock.patch.object(timeutils, 'utcnow') def test_clear_metering_data_meta_tables(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) self.conn.clear_expired_metering_data(3 * 60) session = self.conn._engine_facade.get_session() self.assertEqual(5, session.query(sql_models.Sample).count()) resource_ids = (session.query(sql_models.Resource.internal_id) .group_by(sql_models.Resource.internal_id)) meta_tables = [sql_models.MetaText, sql_models.MetaFloat, sql_models.MetaBigInt, sql_models.MetaBool] s = set() for table in meta_tables: self.assertEqual(0, (session.query(table) .filter(~table.id.in_(resource_ids)).count() )) s.update(session.query(table.id).all()) self.assertEqual(set(resource_ids.all()), s) class CapabilitiesTest(test_base.BaseTestCase): # Check the returned capabilities list, which is specific to each DB # driver def test_capabilities(self): expected_capabilities = { 'meters': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'resources': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'samples': {'query': {'simple': True, 'metadata': True, 'complex': True}}, 'statistics': {'groupby': True, 'query': {'simple': True, 'metadata': True, 'complex': False}, 'aggregation': {'standard': True, 'selectable': { 'max': True, 'min': True, 'sum': True, 'avg': True, 'count': True, 'stddev': True, 'cardinality': True}} }, } actual_capabilities = impl_sqlalchemy.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) def test_event_capabilities(self): expected_capabilities = { 'events': {'query': {'simple': True}}, } actual_capabilities = impl_sqla_event.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) def test_storage_capabilities(self): expected_capabilities = { 'storage': {'production_ready': True}, } actual_capabilities = (impl_sqlalchemy. Connection.get_storage_capabilities()) self.assertEqual(expected_capabilities, actual_capabilities) @tests_db.run_with('sqlite', 'mysql', 'pgsql') class FilterQueryTestForMeters(scenarios.DBTestBase): def prepare_data(self): self.counters = [] c = sample.Sample( 'volume.size', 'gauge', 'GiB', 5, user_id=None, project_id=None, resource_id='fake_id', timestamp=datetime.datetime(2012, 9, 25, 10, 30), resource_metadata={'display_name': 'test-volume', 'tag': 'self.counter', }, source='test', ) self.counters.append(c) msg = utils.meter_message_from_counter( c, secret='not-so-secret') self.conn.record_metering_data(msg) def test_get_meters_by_user(self): meters = list(self.conn.get_meters(user='None')) self.assertEqual(1, len(meters)) def test_get_meters_by_project(self): meters = list(self.conn.get_meters(project='None')) self.assertEqual(1, len(meters)) ceilometer-6.1.5/ceilometer/tests/functional/storage/test_impl_log.py0000664000567000056710000000204613072744706027343 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/storage/impl_log.py """ from oslotest import base from ceilometer.storage import impl_log class ConnectionTest(base.BaseTestCase): @staticmethod def test_get_connection(): conn = impl_log.Connection(None) conn.record_metering_data({'counter_name': 'test', 'resource_id': __name__, 'counter_volume': 1, }) ceilometer-6.1.5/ceilometer/tests/functional/storage/test_pymongo_base.py0000664000567000056710000001310013072744706030214 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests the mongodb and db2 common functionality """ import copy import datetime import mock from ceilometer.publisher import utils from ceilometer import sample from ceilometer.tests import db as tests_db from ceilometer.tests.functional.storage import test_storage_scenarios @tests_db.run_with('mongodb', 'db2') class CompatibilityTest(test_storage_scenarios.DBTestBase): def prepare_data(self): def old_record_metering_data(self, data): received_timestamp = datetime.datetime.utcnow() self.db.resource.update( {'_id': data['resource_id']}, {'$set': {'project_id': data['project_id'], 'user_id': data['user_id'], # Current metadata being used and when it was # last updated. 'timestamp': data['timestamp'], 'received_timestamp': received_timestamp, 'metadata': data['resource_metadata'], 'source': data['source'], }, '$addToSet': {'meter': {'counter_name': data['counter_name'], 'counter_type': data['counter_type'], }, }, }, upsert=True, ) record = copy.copy(data) self.db.meter.insert(record) # Stubout with the old version DB schema, the one w/o 'counter_unit' with mock.patch.object(self.conn, 'record_metering_data', side_effect=old_record_metering_data): self.counters = [] c = sample.Sample( 'volume.size', 'gauge', 'GiB', 5, 'user-id', 'project1', 'resource-id', timestamp=datetime.datetime(2012, 9, 25, 10, 30), resource_metadata={'display_name': 'test-volume', 'tag': 'self.counter', }, source='test', ) self.counters.append(c) msg = utils.meter_message_from_counter( c, secret='not-so-secret') self.conn.record_metering_data(self.conn, msg) def test_counter_unit(self): meters = list(self.conn.get_meters()) self.assertEqual(1, len(meters)) # TODO(ananya) same test should be done for other databse @tests_db.run_with('mongodb', 'db2') class FilterQueryTestForMeters(test_storage_scenarios.DBTestBase): def prepare_data(self): def old_record_metering_data(self, data): received_timestamp = datetime.datetime.utcnow() self.db.resource.update( {'_id': data['resource_id']}, {'$set': {'project_id': data['project_id'], 'user_id': data['user_id'], # Current metadata being used and when it was # last updated. 'timestamp': data['timestamp'], 'received_timestamp': received_timestamp, 'metadata': data['resource_metadata'], 'source': data['source'], }, '$addToSet': {'meter': {'counter_name': data['counter_name'], 'counter_type': data['counter_type'], }, }, }, upsert=True, ) record = copy.copy(data) self.db.meter.insert(record) # Stubout with the old version DB schema, the one w/o 'counter_unit' with mock.patch.object(self.conn, 'record_metering_data', side_effect=old_record_metering_data): self.counters = [] c = sample.Sample( 'volume.size', 'gauge', 'GiB', 5, None, None, None, timestamp=datetime.datetime(2012, 9, 25, 10, 30), resource_metadata={'display_name': 'test-volume', 'tag': 'self.counter', }, source='test', ) self.counters.append(c) msg = utils.meter_message_from_counter( c, secret='not-so-secret') self.conn.record_metering_data(self.conn, msg) def test_get_meters_by_user(self): meters = list(self.conn.get_meters(user='None')) self.assertEqual(1, len(meters)) def test_get_meters_by_resource(self): meters = list(self.conn.get_meters(resource='None')) self.assertEqual(1, len(meters)) def test_get_meters_by_project(self): meters = list(self.conn.get_meters(project='None')) self.assertEqual(1, len(meters)) ceilometer-6.1.5/ceilometer/tests/functional/storage/__init__.py0000664000567000056710000000000013072744703026222 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/functional/storage/test_impl_mongodb.py0000664000567000056710000001262113072744706030207 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/storage/impl_mongodb.py .. note:: In order to run the tests against another MongoDB server set the environment variable CEILOMETER_TEST_MONGODB_URL to point to a MongoDB server before running the tests. """ from ceilometer.event.storage import impl_mongodb as impl_mongodb_event from ceilometer.storage import impl_mongodb from ceilometer.tests import base as test_base from ceilometer.tests import db as tests_db @tests_db.run_with('mongodb') class MongoDBConnection(tests_db.TestBase): def test_connection_pooling(self): test_conn = impl_mongodb.Connection(self.db_manager.url) self.assertEqual(self.conn.conn, test_conn.conn) def test_replica_set(self): url = self.db_manager._url + '?replicaSet=foobar' conn = impl_mongodb.Connection(url) self.assertTrue(conn.conn) @tests_db.run_with('mongodb') class IndexTest(tests_db.TestBase): def _test_ttl_index_absent(self, conn, coll_name, ttl_opt): # create a fake index and check it is deleted coll = getattr(conn.db, coll_name) index_name = '%s_ttl' % coll_name self.CONF.set_override(ttl_opt, -1, group='database') conn.upgrade() self.assertNotIn(index_name, coll.index_information()) self.CONF.set_override(ttl_opt, 456789, group='database') conn.upgrade() self.assertEqual(456789, coll.index_information() [index_name]['expireAfterSeconds']) def test_meter_ttl_index_absent(self): self._test_ttl_index_absent(self.conn, 'meter', 'metering_time_to_live') def test_event_ttl_index_absent(self): self._test_ttl_index_absent(self.event_conn, 'event', 'event_time_to_live') def _test_ttl_index_present(self, conn, coll_name, ttl_opt): coll = getattr(conn.db, coll_name) self.CONF.set_override(ttl_opt, 456789, group='database') conn.upgrade() index_name = '%s_ttl' % coll_name self.assertEqual(456789, coll.index_information() [index_name]['expireAfterSeconds']) self.CONF.set_override(ttl_opt, -1, group='database') conn.upgrade() self.assertNotIn(index_name, coll.index_information()) def test_meter_ttl_index_present(self): self._test_ttl_index_present(self.conn, 'meter', 'metering_time_to_live') def test_event_ttl_index_present(self): self._test_ttl_index_present(self.event_conn, 'event', 'event_time_to_live') class CapabilitiesTest(test_base.BaseTestCase): # Check the returned capabilities list, which is specific to each DB # driver def test_capabilities(self): expected_capabilities = { 'meters': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'resources': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'samples': {'query': {'simple': True, 'metadata': True, 'complex': True}}, 'statistics': {'groupby': True, 'query': {'simple': True, 'metadata': True, 'complex': False}, 'aggregation': {'standard': True, 'selectable': { 'max': True, 'min': True, 'sum': True, 'avg': True, 'count': True, 'stddev': True, 'cardinality': True}} }, } actual_capabilities = impl_mongodb.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) def test_event_capabilities(self): expected_capabilities = { 'events': {'query': {'simple': True}}, } actual_capabilities = impl_mongodb_event.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) def test_storage_capabilities(self): expected_capabilities = { 'storage': {'production_ready': True}, } actual_capabilities = (impl_mongodb.Connection. get_storage_capabilities()) self.assertEqual(expected_capabilities, actual_capabilities) ceilometer-6.1.5/ceilometer/tests/functional/__init__.py0000664000567000056710000000000013072744703024556 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/base.py0000664000567000056710000000706713072744706021616 0ustar jenkinsjenkins00000000000000# Copyright 2012 New Dream Network (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test base classes. """ import functools import os.path import oslo_messaging.conffixture from oslo_utils import timeutils from oslotest import base from oslotest import mockpatch import six from testtools import testcase import webtest import ceilometer from ceilometer import messaging class BaseTestCase(base.BaseTestCase): def setup_messaging(self, conf, exchange=None): self.useFixture(oslo_messaging.conffixture.ConfFixture(conf)) conf.set_override("notification_driver", "messaging") if not exchange: exchange = 'ceilometer' conf.set_override("control_exchange", exchange) # NOTE(sileht): Ensure a new oslo.messaging driver is loaded # between each tests self.transport = messaging.get_transport("fake://", cache=False) self.useFixture(mockpatch.Patch( 'ceilometer.messaging.get_transport', return_value=self.transport)) def assertTimestampEqual(self, first, second, msg=None): """Checks that two timestamps are equals. This relies on assertAlmostEqual to avoid rounding problem, and only checks up the first microsecond values. """ return self.assertAlmostEqual( timeutils.delta_seconds(first, second), 0.0, places=5) def assertIsEmpty(self, obj): try: if len(obj) != 0: self.fail("%s is not empty" % type(obj)) except (TypeError, AttributeError): self.fail("%s doesn't have length" % type(obj)) def assertIsNotEmpty(self, obj): try: if len(obj) == 0: self.fail("%s is empty" % type(obj)) except (TypeError, AttributeError): self.fail("%s doesn't have length" % type(obj)) @staticmethod def path_get(project_file=None): root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', ) ) if project_file: return os.path.join(root, project_file) else: return root def _skip_decorator(func): @functools.wraps(func) def skip_if_not_implemented(*args, **kwargs): try: return func(*args, **kwargs) except ceilometer.NotImplementedError as e: raise testcase.TestSkipped(six.text_type(e)) except webtest.app.AppError as e: if 'not implemented' in six.text_type(e): raise testcase.TestSkipped(six.text_type(e)) raise return skip_if_not_implemented class SkipNotImplementedMeta(type): def __new__(cls, name, bases, local): for attr in local: value = local[attr] if callable(value) and ( attr.startswith('test_') or attr == 'setUp'): local[attr] = _skip_decorator(value) return type.__new__(cls, name, bases, local) ceilometer-6.1.5/ceilometer/tests/db.py0000664000567000056710000002116313072744706021262 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for API tests.""" import os import uuid import warnings import fixtures import mock from oslo_config import fixture as fixture_config from oslotest import mockpatch import six from six.moves.urllib import parse as urlparse import sqlalchemy from testtools import testcase from ceilometer import storage from ceilometer.tests import base as test_base try: from ceilometer.tests import mocks except ImportError: mocks = None # happybase module is not Python 3 compatible yet class MongoDbManager(fixtures.Fixture): def __init__(self, url): self._url = url def setUp(self): super(MongoDbManager, self).setUp() with warnings.catch_warnings(): warnings.filterwarnings( action='ignore', message='.*you must provide a username and password.*') try: self.connection = storage.get_connection( self.url, 'ceilometer.metering.storage') self.event_connection = storage.get_connection( self.url, 'ceilometer.event.storage') except storage.StorageBadVersion as e: raise testcase.TestSkipped(six.text_type(e)) @property def url(self): return '%(url)s_%(db)s' % { 'url': self._url, 'db': uuid.uuid4().hex } class SQLManager(fixtures.Fixture): def __init__(self, url): db_name = 'ceilometer_%s' % uuid.uuid4().hex engine = sqlalchemy.create_engine(url) conn = engine.connect() self._create_database(conn, db_name) conn.close() engine.dispose() parsed = list(urlparse.urlparse(url)) parsed[2] = '/' + db_name self.url = urlparse.urlunparse(parsed) def setUp(self): super(SQLManager, self).setUp() self.connection = storage.get_connection( self.url, 'ceilometer.metering.storage') self.event_connection = storage.get_connection( self.url, 'ceilometer.event.storage') class PgSQLManager(SQLManager): @staticmethod def _create_database(conn, db_name): conn.connection.set_isolation_level(0) conn.execute('CREATE DATABASE %s WITH TEMPLATE template0;' % db_name) conn.connection.set_isolation_level(1) class MySQLManager(SQLManager): @staticmethod def _create_database(conn, db_name): conn.execute('CREATE DATABASE %s;' % db_name) class ElasticSearchManager(fixtures.Fixture): def __init__(self, url): self.url = url def setUp(self): super(ElasticSearchManager, self).setUp() self.connection = storage.get_connection( 'sqlite://', 'ceilometer.metering.storage') self.event_connection = storage.get_connection( self.url, 'ceilometer.event.storage') # prefix each test with unique index name self.event_connection.index_name = 'events_%s' % uuid.uuid4().hex # force index on write so data is queryable right away self.event_connection._refresh_on_write = True class HBaseManager(fixtures.Fixture): def __init__(self, url): self._url = url def setUp(self): super(HBaseManager, self).setUp() self.connection = storage.get_connection( self.url, 'ceilometer.metering.storage') self.event_connection = storage.get_connection( self.url, 'ceilometer.event.storage') # Unique prefix for each test to keep data is distinguished because # all test data is stored in one table data_prefix = str(uuid.uuid4().hex) def table(conn, name): return mocks.MockHBaseTable(name, conn, data_prefix) # Mock only real HBase connection, MConnection "table" method # stays origin. mock.patch('happybase.Connection.table', new=table).start() # We shouldn't delete data and tables after each test, # because it last for too long. # All tests tables will be deleted in setup-test-env.sh mock.patch("happybase.Connection.disable_table", new=mock.MagicMock()).start() mock.patch("happybase.Connection.delete_table", new=mock.MagicMock()).start() mock.patch("happybase.Connection.create_table", new=mock.MagicMock()).start() @property def url(self): return '%s?table_prefix=%s&table_prefix_separator=%s' % ( self._url, os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX", "test"), os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX_SEPARATOR", "_") ) class SQLiteManager(fixtures.Fixture): def __init__(self, url): self.url = url def setUp(self): super(SQLiteManager, self).setUp() self.connection = storage.get_connection( self.url, 'ceilometer.metering.storage') self.event_connection = storage.get_connection( self.url, 'ceilometer.event.storage') @six.add_metaclass(test_base.SkipNotImplementedMeta) class TestBase(test_base.BaseTestCase): DRIVER_MANAGERS = { 'mongodb': MongoDbManager, 'mysql': MySQLManager, 'postgresql': PgSQLManager, 'db2': MongoDbManager, 'sqlite': SQLiteManager, 'es': ElasticSearchManager, } if mocks is not None: DRIVER_MANAGERS['hbase'] = HBaseManager def setUp(self): super(TestBase, self).setUp() db_url = os.environ.get('OVERTEST_URL', "sqlite://").replace( "mysql://", "mysql+pymysql://") engine = urlparse.urlparse(db_url).scheme # in case some drivers have additional specification, for example: # PyMySQL will have scheme mysql+pymysql engine = engine.split('+')[0] # NOTE(Alexei_987) Shortcut to skip expensive db setUp test_method = self._get_test_method() if (hasattr(test_method, '_run_with') and engine not in test_method._run_with): raise testcase.TestSkipped( 'Test is not applicable for %s' % engine) self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF([], project='ceilometer', validate_default_values=True) manager = self.DRIVER_MANAGERS.get(engine) if not manager: self.skipTest("missing driver manager: %s" % engine) self.db_manager = manager(db_url) self.useFixture(self.db_manager) self.conn = self.db_manager.connection self.conn.upgrade() self.event_conn = self.db_manager.event_connection self.event_conn.upgrade() self.useFixture(mockpatch.Patch('ceilometer.storage.get_connection', side_effect=self._get_connection)) # Set a default location for the pipeline config file so the # tests work even if ceilometer is not installed globally on # the system. self.CONF.import_opt('pipeline_cfg_file', 'ceilometer.pipeline') self.CONF.set_override( 'pipeline_cfg_file', self.path_get('etc/ceilometer/pipeline.yaml') ) def tearDown(self): self.event_conn.clear() self.event_conn = None self.conn.clear() self.conn = None super(TestBase, self).tearDown() def _get_connection(self, url, namespace): if namespace == "ceilometer.event.storage": return self.event_conn return self.conn def run_with(*drivers): """Used to mark tests that are only applicable for certain db driver. Skips test if driver is not available. """ def decorator(test): if isinstance(test, type) and issubclass(test, TestBase): # Decorate all test methods for attr in dir(test): value = getattr(test, attr) if callable(value) and attr.startswith('test_'): if six.PY3: value._run_with = drivers else: value.__func__._run_with = drivers else: test._run_with = drivers return test return decorator ceilometer-6.1.5/ceilometer/tests/integration/0000775000567000056710000000000013072745164022642 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/integration/hooks/0000775000567000056710000000000013072745164023765 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/integration/hooks/post_test_hook.sh0000775000567000056710000000677313072744706027406 0ustar jenkinsjenkins00000000000000#!/bin/bash -xe # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script is executed inside post_test_hook function in devstack gate. function generate_testr_results { if [ -f .testrepository/0 ]; then sudo .tox/functional/bin/testr last --subunit > $WORKSPACE/testrepository.subunit sudo mv $WORKSPACE/testrepository.subunit $BASE/logs/testrepository.subunit sudo /usr/os-testr-env/bin/subunit2html $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html sudo gzip -9 $BASE/logs/testrepository.subunit sudo gzip -9 $BASE/logs/testr_results.html sudo chown jenkins:jenkins $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz fi } # If we're running in the gate find our keystone endpoint to give to # gabbi tests and do a chown. Otherwise the existing environment # should provide URL and TOKEN. if [ -d $BASE/new/devstack ]; then export CEILOMETER_DIR="$BASE/new/ceilometer" STACK_USER=stack sudo chown -R $STACK_USER:stack $CEILOMETER_DIR source $BASE/new/devstack/openrc admin admin # Go to the ceilometer dir cd $CEILOMETER_DIR fi openstack catalog list export AODH_SERVICE_URL=$(openstack catalog show alarming -c endpoints -f value | awk '/public/{print $2}') export GNOCCHI_SERVICE_URL=$(openstack catalog show metric -c endpoints -f value | awk '/public/{print $2}') export HEAT_SERVICE_URL=$(openstack catalog show orchestration -c endpoints -f value | awk '/public/{print $2}') export NOVA_SERVICE_URL=$(openstack catalog show compute -c endpoints -f value | awk '/public/{print $2}') export GLANCE_IMAGE_NAME=$(openstack image list | awk '/ cirros.* /{print $4; exit}') export ADMIN_TOKEN=$(openstack token issue -c id -f value) # Run tests echo "Running telemetry integration test suite" set +e sudo -E -H -u ${STACK_USER:-${USER}} tox -eintegration EXIT_CODE=$? echo "* Message queue status:" sudo rabbitmqctl list_queues | grep -e \\.sample -e \\.info sudo rabbitmqctl list_consumers if [ $EXIT_CODE -ne 0 ] ; then set +x echo "* Heat stack:" heat stack-show integration_test echo "* Alarm list:" ceilometer alarm-list echo "* Nova instance list:" openstack server list echo "* Gnocchi instance list:" gnocchi resource list -t instance for instance_id in $(openstack server list -f value -c ID); do echo "* Nova instance detail:" openstack server show $instance_id echo "* Gnocchi instance detail:" gnocchi resource show -t instance $instance_id echo "* Gnocchi measures for instance ${instance_id}:" gnocchi measures show -r $instance_id cpu_util done gnocchi status # Be sure to source Gnocchi settings before source $BASE/new/gnocchi/devstack/settings echo "* Unprocessed measures:" sudo find $GNOCCHI_DATA_DIR set -x fi set -e # Collect and parse result if [ -n "$CEILOMETER_DIR" ]; then generate_testr_results fi exit $EXIT_CODE ceilometer-6.1.5/ceilometer/tests/integration/gabbi/0000775000567000056710000000000013072745164023706 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/integration/gabbi/test_gabbi_live.py0000664000567000056710000000254313072744706027407 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Red Hat. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A test module to exercise the Gnocchi API with gabbi.""" import os from gabbi import driver TESTS_DIR = 'gabbits-live' def load_tests(loader, tests, pattern): """Provide a TestSuite to the discovery process.""" NEEDED_ENV = ["AODH_SERVICE_URL", "GNOCCHI_SERVICE_URL", "HEAT_SERVICE_URL", "NOVA_SERVICE_URL", "GLANCE_IMAGE_NAME", "ADMIN_TOKEN"] for env_variable in NEEDED_ENV: if not os.getenv(env_variable): if os.getenv("GABBI_LIVE_FAIL_IF_NO_TEST"): raise RuntimeError('%s is not set' % env_variable) else: return test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) return driver.build_tests(test_dir, loader, host="localhost", port=8041) ceilometer-6.1.5/ceilometer/tests/integration/gabbi/gabbits-live/0000775000567000056710000000000013072745164026256 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/integration/gabbi/gabbits-live/autoscaling.yaml0000664000567000056710000001173713072744706031465 0ustar jenkinsjenkins00000000000000defaults: request_headers: x-auth-token: $ENVIRON['ADMIN_TOKEN'] tests: - name: list alarms none desc: Lists alarms, none yet exist url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms method: GET response_strings: - "[]" - name: list servers none desc: List servers, none yet exists url: $ENVIRON['NOVA_SERVICE_URL']/servers method: GET response_strings: - "[]" - name: create stack desc: Create an autoscaling stack url: $ENVIRON['HEAT_SERVICE_URL']/stacks method: POST request_headers: content-type: application/json data: <@create_stack.json status: 201 - name: waiting for stack creation desc: Wait for the second event on the stack resource, it can be a success or failure url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test/events?resource_name=integration_test redirects: true method: GET status: 200 poll: count: 300 delay: 1 response_json_paths: $.events[1].resource_name: integration_test - name: control stack status desc: Checks the stack have been created successfully url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test redirects: true method: GET status: 200 poll: count: 5 delay: 1 response_json_paths: $.stack.stack_status: "CREATE_COMPLETE" - name: list servers desc: Wait the autoscaling stack grow to two servers url: $ENVIRON['NOVA_SERVICE_URL']/servers/detail method: GET poll: count: 600 delay: 1 response_json_paths: $.servers[0].metadata.'metering.server_group': $RESPONSE['$.stack.id'] $.servers[1].metadata.'metering.server_group': $RESPONSE['$.stack.id'] $.servers[0].status: ACTIVE $.servers[1].status: ACTIVE $.servers.`len`: 2 - name: check gnocchi resources desc: Check the gnocchi resources for this two servers exists url: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/resource/instance method: GET poll: count: 30 delay: 1 response_strings: - '"id": "$RESPONSE["$.servers[0].id"]"' - '"id": "$RESPONSE["$.servers[1].id"]"' - name: check alarm desc: Check the aodh alarm and its state url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms method: GET poll: count: 30 delay: 1 response_strings: - "integration_test-cpu_alarm_high-" response_json_paths: $[0].state: alarm - name: get stack location for update desc: Get the stack location url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test method: GET status: 302 - name: update stack desc: Update an autoscaling stack url: $LOCATION method: PUT request_headers: content-type: application/json data: <@update_stack.json status: 202 - name: waiting for stack update desc: Wait for the third event on the stack resource, it can be a success or failure url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test/events?resource_name=integration_test redirects: true method: GET status: 200 poll: count: 300 delay: 1 response_json_paths: $.events[3].resource_name: integration_test - name: control stack status desc: Checks the stack have been created successfully url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test redirects: true method: GET status: 200 poll: count: 5 delay: 1 response_json_paths: $.stack.stack_status: "UPDATE_COMPLETE" - name: list servers desc: Wait the autoscaling stack shrink to one server url: $ENVIRON['NOVA_SERVICE_URL']/servers/detail method: GET poll: count: 600 delay: 1 response_json_paths: $.servers[0].metadata.'metering.server_group': $RESPONSE['$.stack.id'] $.servers[0].status: ACTIVE $.servers.`len`: 1 - name: get stack location desc: Get the stack location url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test method: GET status: 302 - name: delete stack desc: Delete the stack url: $LOCATION method: DELETE status: 204 - name: get deleted stack desc: Check the stack have been deleted url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test redirects: true method: GET poll: count: 240 delay: 1 status: 404 - name: list alarms deleted desc: List alarms, no more exist url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms method: GET response_strings: - "[]" - name: list servers deleted desc: List servers, no more exists url: $ENVIRON['NOVA_SERVICE_URL']/servers method: GET response_strings: - "[]" ceilometer-6.1.5/ceilometer/tests/integration/gabbi/gabbits-live/update_stack.json0000664000567000056710000000533013072744706031622 0ustar jenkinsjenkins00000000000000{ "template": { "heat_template_version": "2013-05-23", "description": "Integration Test AutoScaling with heat+ceilometer+gnocchi+aodh", "resources": { "asg": { "type": "OS::Heat::AutoScalingGroup", "properties": { "min_size": 1, "max_size": 2, "resource": { "type": "OS::Nova::Server", "properties": { "networks": [{ "network": "private" }], "flavor": "m1.tiny", "image": "$ENVIRON['GLANCE_IMAGE_NAME']", "metadata": { "metering.server_group": { "get_param": "OS::stack_id" } }, "user_data_format": "RAW", "user_data": {"Fn::Join": ["", [ "#!/bin/sh\n", "echo 'Loading CPU'\n", "set -v\n", "cat /dev/urandom > /dev/null\n" ]]} } } } }, "web_server_scaledown_policy": { "type": "OS::Heat::ScalingPolicy", "properties": { "adjustment_type": "change_in_capacity", "auto_scaling_group_id": { "get_resource": "asg" }, "cooldown": 2, "scaling_adjustment": -1 } }, "cpu_alarm_high": { "type": "OS::Ceilometer::GnocchiAggregationByResourcesAlarm", "properties": { "description": "Scale-down if the mean CPU > 10% on 1 minute", "metric": "cpu_util", "aggregation_method": "mean", "granularity": 60, "evaluation_periods": 1, "threshold": 10, "comparison_operator": "gt", "alarm_actions": [ { "get_attr": [ "web_server_scaledown_policy", "alarm_url" ] } ], "resource_type": "instance", "query": { "str_replace": { "template": "{\"and\": [{\"=\": {\"server_group\": \"stack_id\"}}, {\"=\": {\"ended_at\": null}}]}", "params": { "stack_id": { "get_param": "OS::stack_id" } } } } } } } } } ceilometer-6.1.5/ceilometer/tests/integration/gabbi/gabbits-live/create_stack.json0000664000567000056710000000602113072744703031576 0ustar jenkinsjenkins00000000000000{ "stack_name": "integration_test", "template": { "heat_template_version": "2013-05-23", "description": "Integration Test AutoScaling with heat+ceilometer+gnocchi+aodh", "resources": { "asg": { "type": "OS::Heat::AutoScalingGroup", "properties": { "min_size": 1, "max_size": 2, "resource": { "type": "OS::Nova::Server", "properties": { "networks": [{ "network": "private" }], "flavor": "m1.tiny", "image": "$ENVIRON['GLANCE_IMAGE_NAME']", "metadata": { "metering.server_group": { "get_param": "OS::stack_id" } }, "user_data_format": "RAW", "user_data": {"Fn::Join": ["", [ "#!/bin/sh\n", "echo 'Loading CPU'\n", "set -v\n", "cat /dev/urandom > /dev/null\n" ]]} } } } }, "web_server_scaleup_policy": { "type": "OS::Heat::ScalingPolicy", "properties": { "adjustment_type": "change_in_capacity", "auto_scaling_group_id": { "get_resource": "asg" }, "cooldown": 2, "scaling_adjustment": 1 } }, "cpu_alarm_high": { "type": "OS::Ceilometer::GnocchiAggregationByResourcesAlarm", "properties": { "description": "Scale-up if the mean CPU > 10% on 1 minute", "metric": "cpu_util", "aggregation_method": "mean", "granularity": 60, "evaluation_periods": 1, "threshold": 10, "comparison_operator": "gt", "alarm_actions": [ { "str_replace": { "template": "trust+url", "params": { "url": { "get_attr": [ "web_server_scaleup_policy", "signal_url" ] } } } } ], "resource_type": "instance", "query": { "str_replace": { "template": "{\"and\": [{\"=\": {\"server_group\": \"stack_id\"}}, {\"=\": {\"ended_at\": null}}]}", "params": { "stack_id": { "get_param": "OS::stack_id" } } } } } } } } } ceilometer-6.1.5/ceilometer/tests/integration/gabbi/__init__.py0000664000567000056710000000000013072744703026003 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/integration/__init__.py0000664000567000056710000000000013072744703024737 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/pipeline_base.py0000664000567000056710000025104213072744706023475 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # # Copyright 2013 Intel Corp. # # Authors: Yunhong Jiang # Julien Danjou # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import copy import datetime import traceback import mock from oslo_context import context from oslo_utils import timeutils from oslotest import base from oslotest import mockpatch import six from stevedore import extension from ceilometer import pipeline from ceilometer import publisher from ceilometer.publisher import test as test_publisher from ceilometer import sample from ceilometer import transformer from ceilometer.transformer import accumulator from ceilometer.transformer import arithmetic from ceilometer.transformer import conversions @six.add_metaclass(abc.ABCMeta) class BasePipelineTestCase(base.BaseTestCase): @staticmethod def fake_tem_init(): """Fake a transformerManager for pipeline. The faked entry point setting is below: update: TransformerClass except: TransformerClassException drop: TransformerClassDrop """ pass def fake_tem_get_ext(self, name): class_name_ext = { 'update': self.TransformerClass, 'except': self.TransformerClassException, 'drop': self.TransformerClassDrop, 'cache': accumulator.TransformerAccumulator, 'aggregator': conversions.AggregatorTransformer, 'unit_conversion': conversions.ScalingTransformer, 'rate_of_change': conversions.RateOfChangeTransformer, 'arithmetic': arithmetic.ArithmeticTransformer, 'delta': conversions.DeltaTransformer, } if name in class_name_ext: return extension.Extension(name, None, class_name_ext[name], None, ) raise KeyError(name) def get_publisher(self, url, namespace=''): fake_drivers = {'test://': test_publisher.TestPublisher, 'new://': test_publisher.TestPublisher, 'except://': self.PublisherClassException} return fake_drivers[url](url) class PublisherClassException(publisher.PublisherBase): def publish_samples(self, ctxt, samples): raise Exception() def publish_events(self, ctxt, events): raise Exception() class TransformerClass(transformer.TransformerBase): samples = [] grouping_keys = ['counter_name'] def __init__(self, append_name='_update'): self.__class__.samples = [] self.append_name = append_name def flush(self, ctxt): return [] def handle_sample(self, ctxt, counter): self.__class__.samples.append(counter) newname = getattr(counter, 'name') + self.append_name return sample.Sample( name=newname, type=counter.type, volume=counter.volume, unit=counter.unit, user_id=counter.user_id, project_id=counter.project_id, resource_id=counter.resource_id, timestamp=counter.timestamp, resource_metadata=counter.resource_metadata, ) class TransformerClassDrop(transformer.TransformerBase): samples = [] grouping_keys = ['resource_id'] def __init__(self): self.__class__.samples = [] def handle_sample(self, ctxt, counter): self.__class__.samples.append(counter) class TransformerClassException(object): grouping_keys = ['resource_id'] @staticmethod def handle_sample(ctxt, counter): raise Exception() def setUp(self): super(BasePipelineTestCase, self).setUp() self.test_counter = sample.Sample( name='a', type=sample.TYPE_GAUGE, volume=1, unit='B', user_id="test_user", project_id="test_proj", resource_id="test_resource", timestamp=timeutils.utcnow().isoformat(), resource_metadata={} ) self.useFixture(mockpatch.PatchObject( publisher, 'get_publisher', side_effect=self.get_publisher)) self.transformer_manager = mock.MagicMock() self.transformer_manager.__getitem__.side_effect = \ self.fake_tem_get_ext self._setup_pipeline_cfg() self._reraise_exception = True self.useFixture(mockpatch.Patch( 'ceilometer.pipeline.LOG.exception', side_effect=self._handle_reraise_exception)) def _handle_reraise_exception(self, msg): if self._reraise_exception: raise Exception(traceback.format_exc()) @abc.abstractmethod def _setup_pipeline_cfg(self): """Setup the appropriate form of pipeline config.""" @abc.abstractmethod def _augment_pipeline_cfg(self): """Augment the pipeline config with an additional element.""" @abc.abstractmethod def _break_pipeline_cfg(self): """Break the pipeline config with a malformed element.""" @abc.abstractmethod def _dup_pipeline_name_cfg(self): """Break the pipeline config with duplicate pipeline name.""" @abc.abstractmethod def _set_pipeline_cfg(self, field, value): """Set a field to a value in the pipeline config.""" @abc.abstractmethod def _extend_pipeline_cfg(self, field, value): """Extend an existing field in the pipeline config with a value.""" @abc.abstractmethod def _unset_pipeline_cfg(self, field): """Clear an existing field in the pipeline config.""" def _exception_create_pipelinemanager(self): self.assertRaises(pipeline.PipelineException, pipeline.PipelineManager, self.pipeline_cfg, self.transformer_manager) def test_no_counters(self): self._unset_pipeline_cfg('counters') self._exception_create_pipelinemanager() def test_no_transformers(self): self._unset_pipeline_cfg('transformers') pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) def test_no_name(self): self._unset_pipeline_cfg('name') self._exception_create_pipelinemanager() def test_no_interval(self): self._unset_pipeline_cfg('interval') pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] self.assertEqual(600, pipe.get_interval()) def test_no_publishers(self): self._unset_pipeline_cfg('publishers') self._exception_create_pipelinemanager() def test_invalid_resources(self): invalid_resource = {'invalid': 1} self._set_pipeline_cfg('resources', invalid_resource) self._exception_create_pipelinemanager() def test_check_counters_include_exclude_same(self): counter_cfg = ['a', '!a'] self._set_pipeline_cfg('counters', counter_cfg) self._exception_create_pipelinemanager() def test_check_counters_include_exclude(self): counter_cfg = ['a', '!b'] self._set_pipeline_cfg('counters', counter_cfg) self._exception_create_pipelinemanager() def test_check_counters_wildcard_included(self): counter_cfg = ['a', '*'] self._set_pipeline_cfg('counters', counter_cfg) self._exception_create_pipelinemanager() def test_check_publishers_invalid_publisher(self): publisher_cfg = ['test_invalid'] self._set_pipeline_cfg('publishers', publisher_cfg) def test_invalid_string_interval(self): self._set_pipeline_cfg('interval', 'string') self._exception_create_pipelinemanager() def test_check_transformer_invalid_transformer(self): transformer_cfg = [ {'name': "test_invalid", 'parameters': {}} ] self._set_pipeline_cfg('transformers', transformer_cfg) self._exception_create_pipelinemanager() def test_get_interval(self): pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] self.assertEqual(5, pipe.get_interval()) def test_publisher_transformer_invoked(self): pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, len(self.TransformerClass.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) self.assertEqual('a', getattr(self.TransformerClass.samples[0], "name")) def test_multiple_included_counters(self): counter_cfg = ['a', 'b'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher(None) as p: p([self.test_counter]) self.assertEqual(2, len(publisher.samples)) self.assertEqual(2, len(self.TransformerClass.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) self.assertEqual('b_update', getattr(publisher.samples[1], "name")) @mock.patch('ceilometer.pipeline.LOG') def test_none_volume_counter(self, LOG): self._set_pipeline_cfg('counters', ['empty_volume']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) publisher = pipeline_manager.pipelines[0].publishers[0] test_s = sample.Sample( name='empty_volume', type=self.test_counter.type, volume=None, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher(None) as p: p([test_s]) LOG.warning.assert_called_once_with( 'metering data %(counter_name)s for %(resource_id)s ' '@ %(timestamp)s has no volume (volume: %(counter_volume)s), the ' 'sample will be dropped' % {'counter_name': test_s.name, 'resource_id': test_s.resource_id, 'timestamp': test_s.timestamp, 'counter_volume': test_s.volume}) self.assertEqual(0, len(publisher.samples)) @mock.patch('ceilometer.pipeline.LOG') def test_fake_volume_counter(self, LOG): self._set_pipeline_cfg('counters', ['fake_volume']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) publisher = pipeline_manager.pipelines[0].publishers[0] test_s = sample.Sample( name='fake_volume', type=self.test_counter.type, volume='fake_value', unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher(None) as p: p([test_s]) LOG.warning.assert_called_once_with( 'metering data %(counter_name)s for %(resource_id)s ' '@ %(timestamp)s has volume which is not a number ' '(volume: %(counter_volume)s), the sample will be dropped' % {'counter_name': test_s.name, 'resource_id': test_s.resource_id, 'timestamp': test_s.timestamp, 'counter_volume': test_s.volume}) self.assertEqual(0, len(publisher.samples)) def test_counter_dont_match(self): counter_cfg = ['nomatch'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) self.assertEqual(0, publisher.calls) def test_wildcard_counter(self): counter_cfg = ['*'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, len(self.TransformerClass.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) def test_wildcard_excluded_counters(self): counter_cfg = ['*', '!a'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) self.assertFalse(pipeline_manager.pipelines[0].support_meter('a')) def test_wildcard_excluded_counters_not_excluded(self): counter_cfg = ['*', '!b'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, len(self.TransformerClass.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) def test_all_excluded_counters_not_excluded(self): counter_cfg = ['!b', '!c'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, len(self.TransformerClass.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) self.assertEqual('a', getattr(self.TransformerClass.samples[0], "name")) def test_all_excluded_counters_is_excluded(self): counter_cfg = ['!a', '!c'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) self.assertFalse(pipeline_manager.pipelines[0].support_meter('a')) self.assertTrue(pipeline_manager.pipelines[0].support_meter('b')) self.assertFalse(pipeline_manager.pipelines[0].support_meter('c')) def test_wildcard_and_excluded_wildcard_counters(self): counter_cfg = ['*', '!disk.*'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) self.assertFalse(pipeline_manager.pipelines[0]. support_meter('disk.read.bytes')) self.assertTrue(pipeline_manager.pipelines[0].support_meter('cpu')) def test_included_counter_and_wildcard_counters(self): counter_cfg = ['cpu', 'disk.*'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) self.assertTrue(pipeline_manager.pipelines[0]. support_meter('disk.read.bytes')) self.assertTrue(pipeline_manager.pipelines[0].support_meter('cpu')) self.assertFalse(pipeline_manager.pipelines[0]. support_meter('instance')) def test_excluded_counter_and_excluded_wildcard_counters(self): counter_cfg = ['!cpu', '!disk.*'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) self.assertFalse(pipeline_manager.pipelines[0]. support_meter('disk.read.bytes')) self.assertFalse(pipeline_manager.pipelines[0].support_meter('cpu')) self.assertTrue(pipeline_manager.pipelines[0]. support_meter('instance')) def test_multiple_pipeline(self): self._augment_pipeline_cfg() pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, publisher.calls) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) new_publisher = pipeline_manager.pipelines[1].publishers[0] self.assertEqual(1, len(new_publisher.samples)) self.assertEqual(1, new_publisher.calls) self.assertEqual('b_new', getattr(new_publisher.samples[0], "name")) self.assertEqual(2, len(self.TransformerClass.samples)) self.assertEqual('a', getattr(self.TransformerClass.samples[0], "name")) self.assertEqual('b', getattr(self.TransformerClass.samples[1], "name")) def test_multiple_pipeline_exception(self): self._reraise_exception = False self._break_pipeline_cfg() pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, publisher.calls) self.assertEqual(1, len(publisher.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) self.assertEqual(2, len(self.TransformerClass.samples)) self.assertEqual('a', getattr(self.TransformerClass.samples[0], "name")) self.assertEqual('b', getattr(self.TransformerClass.samples[1], "name")) def test_none_transformer_pipeline(self): self._set_pipeline_cfg('transformers', None) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, publisher.calls) self.assertEqual('a', getattr(publisher.samples[0], 'name')) def test_empty_transformer_pipeline(self): self._set_pipeline_cfg('transformers', []) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, publisher.calls) self.assertEqual('a', getattr(publisher.samples[0], 'name')) def test_multiple_transformer_same_class(self): transformer_cfg = [ { 'name': 'update', 'parameters': {} }, { 'name': 'update', 'parameters': {} }, ] self._set_pipeline_cfg('transformers', transformer_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, publisher.calls) self.assertEqual(1, len(publisher.samples)) self.assertEqual('a_update_update', getattr(publisher.samples[0], 'name')) self.assertEqual(2, len(self.TransformerClass.samples)) self.assertEqual('a', getattr(self.TransformerClass.samples[0], 'name')) self.assertEqual('a_update', getattr(self.TransformerClass.samples[1], 'name')) def test_multiple_transformer_same_class_different_parameter(self): transformer_cfg = [ { 'name': 'update', 'parameters': { "append_name": "_update", } }, { 'name': 'update', 'parameters': { "append_name": "_new", } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) self.assertEqual(2, len(self.TransformerClass.samples)) self.assertEqual('a', getattr(self.TransformerClass.samples[0], 'name')) self.assertEqual('a_update', getattr(self.TransformerClass.samples[1], 'name')) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual('a_update_new', getattr(publisher.samples[0], 'name')) def test_multiple_transformer_drop_transformer(self): transformer_cfg = [ { 'name': 'update', 'parameters': { "append_name": "_update", } }, { 'name': 'drop', 'parameters': {} }, { 'name': 'update', 'parameters': { "append_name": "_new", } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) self.assertEqual(1, len(self.TransformerClass.samples)) self.assertEqual('a', getattr(self.TransformerClass.samples[0], 'name')) self.assertEqual(1, len(self.TransformerClassDrop.samples)) self.assertEqual('a_update', getattr(self.TransformerClassDrop.samples[0], 'name')) def test_multiple_publisher(self): self._set_pipeline_cfg('publishers', ['test://', 'new://']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] new_publisher = pipeline_manager.pipelines[0].publishers[1] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, len(new_publisher.samples)) self.assertEqual('a_update', getattr(new_publisher.samples[0], 'name')) self.assertEqual('a_update', getattr(publisher.samples[0], 'name')) def test_multiple_publisher_isolation(self): self._reraise_exception = False self._set_pipeline_cfg('publishers', ['except://', 'new://']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) new_publisher = pipeline_manager.pipelines[0].publishers[1] self.assertEqual(1, len(new_publisher.samples)) self.assertEqual('a_update', getattr(new_publisher.samples[0], 'name')) def test_multiple_counter_pipeline(self): self._set_pipeline_cfg('counters', ['a', 'b']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter, sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, )]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(2, len(publisher.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], 'name')) self.assertEqual('b_update', getattr(publisher.samples[1], 'name')) def test_flush_pipeline_cache(self): CACHE_SIZE = 10 extra_transformer_cfg = [ { 'name': 'cache', 'parameters': { 'size': CACHE_SIZE, } }, { 'name': 'update', 'parameters': { 'append_name': '_new' } }, ] self._extend_pipeline_cfg('transformers', extra_transformer_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, self.test_counter) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) pipe.flush(None) self.assertEqual(0, len(publisher.samples)) pipe.publish_data(None, self.test_counter) pipe.flush(None) self.assertEqual(0, len(publisher.samples)) for i in range(CACHE_SIZE - 2): pipe.publish_data(None, self.test_counter) pipe.flush(None) self.assertEqual(CACHE_SIZE, len(publisher.samples)) self.assertEqual('a_update_new', getattr(publisher.samples[0], 'name')) def test_flush_pipeline_cache_multiple_counter(self): CACHE_SIZE = 3 extra_transformer_cfg = [ { 'name': 'cache', 'parameters': { 'size': CACHE_SIZE } }, { 'name': 'update', 'parameters': { 'append_name': '_new' } }, ] self._extend_pipeline_cfg('transformers', extra_transformer_cfg) self._set_pipeline_cfg('counters', ['a', 'b']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter, sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, )]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) with pipeline_manager.publisher(None) as p: p([self.test_counter]) self.assertEqual(CACHE_SIZE, len(publisher.samples)) self.assertEqual('a_update_new', getattr(publisher.samples[0], 'name')) self.assertEqual('b_update_new', getattr(publisher.samples[1], 'name')) def test_flush_pipeline_cache_before_publisher(self): extra_transformer_cfg = [{ 'name': 'cache', 'parameters': {} }] self._extend_pipeline_cfg('transformers', extra_transformer_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] publisher = pipe.publishers[0] pipe.publish_data(None, self.test_counter) self.assertEqual(0, len(publisher.samples)) pipe.flush(None) self.assertEqual(1, len(publisher.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], 'name')) def test_global_unit_conversion(self): scale = 'volume / ((10**6) * 60)' transformer_cfg = [ { 'name': 'unit_conversion', 'parameters': { 'source': {}, 'target': {'name': 'cpu_mins', 'unit': 'min', 'scale': scale}, } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['cpu']) counters = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=1200000000, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={} ), ] pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, counters) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) pipe.flush(None) self.assertEqual(1, len(publisher.samples)) cpu_mins = publisher.samples[-1] self.assertEqual('cpu_mins', getattr(cpu_mins, 'name')) self.assertEqual('min', getattr(cpu_mins, 'unit')) self.assertEqual(sample.TYPE_CUMULATIVE, getattr(cpu_mins, 'type')) self.assertEqual(20, getattr(cpu_mins, 'volume')) def test_unit_identified_source_unit_conversion(self): transformer_cfg = [ { 'name': 'unit_conversion', 'parameters': { 'source': {'unit': '°C'}, 'target': {'unit': '°F', 'scale': '(volume * 1.8) + 32'}, } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['core_temperature', 'ambient_temperature']) counters = [ sample.Sample( name='core_temperature', type=sample.TYPE_GAUGE, volume=36.0, unit='°C', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={} ), sample.Sample( name='ambient_temperature', type=sample.TYPE_GAUGE, volume=88.8, unit='°F', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={} ), ] pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, counters) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(2, len(publisher.samples)) core_temp = publisher.samples[0] self.assertEqual('core_temperature', getattr(core_temp, 'name')) self.assertEqual('°F', getattr(core_temp, 'unit')) self.assertEqual(96.8, getattr(core_temp, 'volume')) amb_temp = publisher.samples[1] self.assertEqual('ambient_temperature', getattr(amb_temp, 'name')) self.assertEqual('°F', getattr(amb_temp, 'unit')) self.assertEqual(88.8, getattr(amb_temp, 'volume')) self.assertEqual(96.8, getattr(core_temp, 'volume')) def _do_test_rate_of_change_conversion(self, prev, curr, type, expected, offset=1, weight=None): s = ("(resource_metadata.user_metadata.autoscaling_weight or 1.0)" "* (resource_metadata.non.existent or 1.0)" "* (100.0 / (10**9 * (resource_metadata.cpu_number or 1)))") transformer_cfg = [ { 'name': 'rate_of_change', 'parameters': { 'source': {}, 'target': {'name': 'cpu_util', 'unit': '%', 'type': sample.TYPE_GAUGE, 'scale': s}, } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['cpu']) now = timeutils.utcnow() later = now + datetime.timedelta(minutes=offset) um = {'autoscaling_weight': weight} if weight else {} counters = [ sample.Sample( name='cpu', type=type, volume=prev, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=now.isoformat(), resource_metadata={'cpu_number': 4, 'user_metadata': um}, ), sample.Sample( name='cpu', type=type, volume=prev, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource2', timestamp=now.isoformat(), resource_metadata={'cpu_number': 2, 'user_metadata': um}, ), sample.Sample( name='cpu', type=type, volume=curr, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=later.isoformat(), resource_metadata={'cpu_number': 4, 'user_metadata': um}, ), sample.Sample( name='cpu', type=type, volume=curr, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource2', timestamp=later.isoformat(), resource_metadata={'cpu_number': 2, 'user_metadata': um}, ), ] pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, counters) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(2, len(publisher.samples)) pipe.flush(None) self.assertEqual(2, len(publisher.samples)) cpu_util = publisher.samples[0] self.assertEqual('cpu_util', getattr(cpu_util, 'name')) self.assertEqual('test_resource', getattr(cpu_util, 'resource_id')) self.assertEqual('%', getattr(cpu_util, 'unit')) self.assertEqual(sample.TYPE_GAUGE, getattr(cpu_util, 'type')) self.assertEqual(expected, getattr(cpu_util, 'volume')) cpu_util = publisher.samples[1] self.assertEqual('cpu_util', getattr(cpu_util, 'name')) self.assertEqual('test_resource2', getattr(cpu_util, 'resource_id')) self.assertEqual('%', getattr(cpu_util, 'unit')) self.assertEqual(sample.TYPE_GAUGE, getattr(cpu_util, 'type')) self.assertEqual(expected * 2, getattr(cpu_util, 'volume')) def test_rate_of_change_conversion(self): self._do_test_rate_of_change_conversion(120000000000, 180000000000, sample.TYPE_CUMULATIVE, 25.0) def test_rate_of_change_conversion_weight(self): self._do_test_rate_of_change_conversion(120000000000, 180000000000, sample.TYPE_CUMULATIVE, 27.5, weight=1.1) def test_rate_of_change_conversion_negative_cumulative_delta(self): self._do_test_rate_of_change_conversion(180000000000, 120000000000, sample.TYPE_CUMULATIVE, 50.0) def test_rate_of_change_conversion_negative_gauge_delta(self): self._do_test_rate_of_change_conversion(180000000000, 120000000000, sample.TYPE_GAUGE, -25.0) def test_rate_of_change_conversion_zero_delay(self): self._do_test_rate_of_change_conversion(120000000000, 120000000000, sample.TYPE_CUMULATIVE, 0.0, offset=0) def test_rate_of_change_no_predecessor(self): s = "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" transformer_cfg = [ { 'name': 'rate_of_change', 'parameters': { 'source': {}, 'target': {'name': 'cpu_util', 'unit': '%', 'type': sample.TYPE_GAUGE, 'scale': s} } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['cpu']) now = timeutils.utcnow() counters = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=120000000000, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=now.isoformat(), resource_metadata={'cpu_number': 4} ), ] pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, counters) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) pipe.flush(None) self.assertEqual(0, len(publisher.samples)) @mock.patch('ceilometer.transformer.conversions.LOG') def test_rate_of_change_out_of_order(self, the_log): s = "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" transformer_cfg = [ { 'name': 'rate_of_change', 'parameters': { 'source': {}, 'target': {'name': 'cpu_util', 'unit': '%', 'type': sample.TYPE_GAUGE, 'scale': s} } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['cpu']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] now = timeutils.utcnow() earlier = now - datetime.timedelta(seconds=10) later = now + datetime.timedelta(seconds=10) counters = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=125000000000, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=now.isoformat(), resource_metadata={'cpu_number': 4} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=120000000000, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=earlier.isoformat(), resource_metadata={'cpu_number': 4} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=130000000000, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=later.isoformat(), resource_metadata={'cpu_number': 4} ), ] pipe.publish_data(None, counters) publisher = pipe.publishers[0] self.assertEqual(1, len(publisher.samples)) pipe.flush(None) self.assertEqual(1, len(publisher.samples)) cpu_util_sample = publisher.samples[0] self.assertEqual(12.5, cpu_util_sample.volume) the_log.warning.assert_called_with( 'dropping out of time order sample: %s', (counters[1],) ) def test_resources(self): resources = ['test1://', 'test2://'] self._set_pipeline_cfg('resources', resources) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) self.assertEqual(resources, pipeline_manager.pipelines[0].resources) def test_no_resources(self): pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) self.assertEqual(0, len(pipeline_manager.pipelines[0].resources)) def _do_test_rate_of_change_mapping(self, pipe, meters, units): now = timeutils.utcnow() base = 1000 offset = 7 rate = 42 later = now + datetime.timedelta(minutes=offset) counters = [] for v, ts in [(base, now.isoformat()), (base + (offset * 60 * rate), later.isoformat())]: for n, u, r in [(meters[0], units[0], 'resource1'), (meters[1], units[1], 'resource2')]: s = sample.Sample( name=n, type=sample.TYPE_CUMULATIVE, volume=v, unit=u, user_id='test_user', project_id='test_proj', resource_id=r, timestamp=ts, resource_metadata={}, ) counters.append(s) pipe.publish_data(None, counters) publisher = pipe.publishers[0] self.assertEqual(2, len(publisher.samples)) pipe.flush(None) self.assertEqual(2, len(publisher.samples)) bps = publisher.samples[0] self.assertEqual('%s.rate' % meters[0], getattr(bps, 'name')) self.assertEqual('resource1', getattr(bps, 'resource_id')) self.assertEqual('%s/s' % units[0], getattr(bps, 'unit')) self.assertEqual(sample.TYPE_GAUGE, getattr(bps, 'type')) self.assertEqual(rate, getattr(bps, 'volume')) rps = publisher.samples[1] self.assertEqual('%s.rate' % meters[1], getattr(rps, 'name')) self.assertEqual('resource2', getattr(rps, 'resource_id')) self.assertEqual('%s/s' % units[1], getattr(rps, 'unit')) self.assertEqual(sample.TYPE_GAUGE, getattr(rps, 'type')) self.assertEqual(rate, getattr(rps, 'volume')) def test_rate_of_change_mapping(self): map_from = {'name': 'disk\\.(read|write)\\.(bytes|requests)', 'unit': '(B|request)'} map_to = {'name': 'disk.\\1.\\2.rate', 'unit': '\\1/s'} transformer_cfg = [ { 'name': 'rate_of_change', 'parameters': { 'source': { 'map_from': map_from }, 'target': { 'map_to': map_to, 'type': sample.TYPE_GAUGE }, }, }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['disk.read.bytes', 'disk.write.requests']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] meters = ('disk.read.bytes', 'disk.write.requests') units = ('B', 'request') self._do_test_rate_of_change_mapping(pipe, meters, units) def _do_test_aggregator(self, parameters, expected_length): transformer_cfg = [ { 'name': 'aggregator', 'parameters': parameters, }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['storage.objects.incoming.bytes']) counters = [ sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=26, unit='B', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=16, unit='B', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ), sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=53, unit='B', user_id='test_user_bis', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=42, unit='B', user_id='test_user_bis', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ), sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=15, unit='B', user_id='test_user', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ), sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=2, unit='B', user_id='test_user_bis', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '3.0'} ), ] pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, counters) pipe.flush(None) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(expected_length, len(publisher.samples)) return sorted(publisher.samples, key=lambda s: s.volume) def test_aggregator_meter_type(self): volumes = [1.0, 2.0, 3.0] transformer_cfg = [ { 'name': 'aggregator', 'parameters': {'size': len(volumes) * len(sample.TYPES)} }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['testgauge', 'testcumulative', 'testdelta']) counters = [] for sample_type in sample.TYPES: for volume in volumes: counters.append(sample.Sample( name='test' + sample_type, type=sample_type, volume=volume, unit='B', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} )) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, counters) pipe.flush(None) publisher = pipeline_manager.pipelines[0].publishers[0] actual = sorted(s.volume for s in publisher.samples) self.assertEqual([2.0, 3.0, 6.0], actual) def test_aggregator_metadata(self): for conf, expected_version in [('last', '2.0'), ('first', '1.0')]: samples = self._do_test_aggregator({ 'resource_metadata': conf, 'target': {'name': 'aggregated-bytes'} }, expected_length=4) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(2, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '3.0'}, s.resource_metadata) s = samples[1] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(15, s.volume) self.assertEqual('test_user', s.user_id) self.assertEqual('test_proj_bis', s.project_id) self.assertEqual({'version': '2.0'}, s.resource_metadata) s = samples[2] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(42, s.volume) self.assertEqual('test_user', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': expected_version}, s.resource_metadata) s = samples[3] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(95, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj_bis', s.project_id) self.assertEqual({'version': expected_version}, s.resource_metadata) def test_aggregator_user_last_and_metadata_last(self): samples = self._do_test_aggregator({ 'resource_metadata': 'last', 'user_id': 'last', 'target': {'name': 'aggregated-bytes'} }, expected_length=2) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(44, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '3.0'}, s.resource_metadata) s = samples[1] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(110, s.volume) self.assertEqual('test_user', s.user_id) self.assertEqual('test_proj_bis', s.project_id) self.assertEqual({'version': '2.0'}, s.resource_metadata) def test_aggregator_user_first_and_metadata_last(self): samples = self._do_test_aggregator({ 'resource_metadata': 'last', 'user_id': 'first', 'target': {'name': 'aggregated-bytes'} }, expected_length=2) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(44, s.volume) self.assertEqual('test_user', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '3.0'}, s.resource_metadata) s = samples[1] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(110, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj_bis', s.project_id) self.assertEqual({'version': '2.0'}, s.resource_metadata) def test_aggregator_all_first(self): samples = self._do_test_aggregator({ 'resource_metadata': 'first', 'user_id': 'first', 'project_id': 'first', 'target': {'name': 'aggregated-bytes'} }, expected_length=1) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(154, s.volume) self.assertEqual('test_user', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '1.0'}, s.resource_metadata) def test_aggregator_all_last(self): samples = self._do_test_aggregator({ 'resource_metadata': 'last', 'user_id': 'last', 'project_id': 'last', 'target': {'name': 'aggregated-bytes'} }, expected_length=1) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(154, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '3.0'}, s.resource_metadata) def test_aggregator_all_mixed(self): samples = self._do_test_aggregator({ 'resource_metadata': 'drop', 'user_id': 'first', 'project_id': 'last', 'target': {'name': 'aggregated-bytes'} }, expected_length=1) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(154, s.volume) self.assertEqual('test_user', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({}, s.resource_metadata) def test_aggregator_metadata_default(self): samples = self._do_test_aggregator({ 'user_id': 'last', 'project_id': 'last', 'target': {'name': 'aggregated-bytes'} }, expected_length=1) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(154, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '3.0'}, s.resource_metadata) @mock.patch('ceilometer.transformer.conversions.LOG') def test_aggregator_metadata_invalid(self, mylog): samples = self._do_test_aggregator({ 'resource_metadata': 'invalid', 'user_id': 'last', 'project_id': 'last', 'target': {'name': 'aggregated-bytes'} }, expected_length=1) s = samples[0] self.assertTrue(mylog.warning.called) self.assertEqual('aggregated-bytes', s.name) self.assertEqual(154, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '3.0'}, s.resource_metadata) def test_aggregator_sized_flush(self): transformer_cfg = [ { 'name': 'aggregator', 'parameters': {'size': 2}, }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['storage.objects.incoming.bytes']) counters = [ sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=26, unit='B', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=16, unit='B', user_id='test_user_bis', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ) ] pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, [counters[0]]) pipe.flush(None) publisher = pipe.publishers[0] self.assertEqual(0, len(publisher.samples)) pipe.publish_data(None, [counters[1]]) pipe.flush(None) publisher = pipe.publishers[0] self.assertEqual(2, len(publisher.samples)) def test_aggregator_timed_flush(self): timeutils.set_time_override() transformer_cfg = [ { 'name': 'aggregator', 'parameters': {'size': 900, 'retention_time': 60}, }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['storage.objects.incoming.bytes']) counters = [ sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=26, unit='B', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), ] pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, counters) pipe.flush(None) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) timeutils.advance_time_seconds(120) pipe.flush(None) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) def test_aggregator_without_authentication(self): transformer_cfg = [ { 'name': 'aggregator', 'parameters': {'size': 2}, }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['storage.objects.outgoing.bytes']) counters = [ sample.Sample( name='storage.objects.outgoing.bytes', type=sample.TYPE_DELTA, volume=26, unit='B', user_id=None, project_id=None, resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='storage.objects.outgoing.bytes', type=sample.TYPE_DELTA, volume=16, unit='B', user_id=None, project_id=None, resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ) ] pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, [counters[0]]) pipe.flush(None) publisher = pipe.publishers[0] self.assertEqual(0, len(publisher.samples)) pipe.publish_data(None, [counters[1]]) pipe.flush(None) publisher = pipe.publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(42, getattr(publisher.samples[0], 'volume')) self.assertEqual("test_resource", getattr(publisher.samples[0], 'resource_id')) def test_aggregator_to_rate_of_change_transformer_two_resources(self): resource_id = ['1ca738a1-c49c-4401-8346-5c60ebdb03f4', '5dd418a6-c6a9-49c9-9cef-b357d72c71dd'] aggregator = conversions.AggregatorTransformer(size="2", timestamp="last") rate_of_change_transformer = conversions.RateOfChangeTransformer() counter_time = timeutils.parse_isotime('2016-01-01T12:00:00+00:00') for offset in range(2): counter = copy.copy(self.test_counter) counter.timestamp = timeutils.isotime(counter_time) counter.resource_id = resource_id[0] counter.volume = offset counter.type = sample.TYPE_CUMULATIVE counter.unit = 'ns' aggregator.handle_sample(context.get_admin_context(), counter) if offset == 1: test_time = counter_time counter_time = counter_time + datetime.timedelta(0, 1) aggregated_counters = aggregator.flush(context.get_admin_context()) self.assertEqual(len(aggregated_counters), 1) self.assertEqual(aggregated_counters[0].timestamp, timeutils.isotime(test_time)) rate_of_change_transformer.handle_sample(context.get_admin_context(), aggregated_counters[0]) for offset in range(2): counter = copy.copy(self.test_counter) counter.timestamp = timeutils.isotime(counter_time) counter.resource_id = resource_id[offset] counter.volume = 2 counter.type = sample.TYPE_CUMULATIVE counter.unit = 'ns' aggregator.handle_sample(context.get_admin_context(), counter) if offset == 0: test_time = counter_time counter_time = counter_time + datetime.timedelta(0, 1) aggregated_counters = aggregator.flush(context.get_admin_context()) self.assertEqual(len(aggregated_counters), 2) for counter in aggregated_counters: if counter.resource_id == resource_id[0]: rateOfChange = rate_of_change_transformer.handle_sample( context.get_admin_context(), counter) self.assertEqual(counter.timestamp, timeutils.isotime(test_time)) self.assertEqual(rateOfChange.volume, 1) def _do_test_arithmetic_expr_parse(self, expr, expected): actual = arithmetic.ArithmeticTransformer.parse_expr(expr) self.assertEqual(expected, actual) def test_arithmetic_expr_parse(self): expr = '$(cpu) + $(cpu.util)' expected = ('cpu.volume + _cpu_util_ESC.volume', { 'cpu': 'cpu', 'cpu.util': '_cpu_util_ESC' }) self._do_test_arithmetic_expr_parse(expr, expected) def test_arithmetic_expr_parse_parameter(self): expr = '$(cpu) + $(cpu.util).resource_metadata' expected = ('cpu.volume + _cpu_util_ESC.resource_metadata', { 'cpu': 'cpu', 'cpu.util': '_cpu_util_ESC' }) self._do_test_arithmetic_expr_parse(expr, expected) def test_arithmetic_expr_parse_reserved_keyword(self): expr = '$(class) + $(cpu.util)' expected = ('_class_ESC.volume + _cpu_util_ESC.volume', { 'class': '_class_ESC', 'cpu.util': '_cpu_util_ESC' }) self._do_test_arithmetic_expr_parse(expr, expected) def test_arithmetic_expr_parse_already_escaped(self): expr = '$(class) + $(_class_ESC)' expected = ('_class_ESC.volume + __class_ESC_ESC.volume', { 'class': '_class_ESC', '_class_ESC': '__class_ESC_ESC' }) self._do_test_arithmetic_expr_parse(expr, expected) def _do_test_arithmetic(self, expression, scenario, expected): transformer_cfg = [ { 'name': 'arithmetic', 'parameters': { 'target': {'name': 'new_meter', 'unit': '%', 'type': sample.TYPE_GAUGE, 'expr': expression}, } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', list(set(s['name'] for s in scenario))) counters = [] test_resources = ['test_resource1', 'test_resource2'] for resource_id in test_resources: for s in scenario: counters.append(sample.Sample( name=s['name'], type=sample.TYPE_CUMULATIVE, volume=s['volume'], unit='ns', user_id='test_user', project_id='test_proj', resource_id=resource_id, timestamp=timeutils.utcnow().isoformat(), resource_metadata=s.get('metadata') )) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] for s in counters: pipe.publish_data(None, s) pipe.flush(None) publisher = pipeline_manager.pipelines[0].publishers[0] expected_len = len(test_resources) * len(expected) self.assertEqual(expected_len, len(publisher.samples)) # bucket samples by resource first samples_by_resource = dict((r, []) for r in test_resources) for s in publisher.samples: samples_by_resource[s.resource_id].append(s) for resource_id in samples_by_resource: self.assertEqual(len(expected), len(samples_by_resource[resource_id])) for i, s in enumerate(samples_by_resource[resource_id]): self.assertEqual('new_meter', getattr(s, 'name')) self.assertEqual(resource_id, getattr(s, 'resource_id')) self.assertEqual('%', getattr(s, 'unit')) self.assertEqual(sample.TYPE_GAUGE, getattr(s, 'type')) self.assertEqual(expected[i], getattr(s, 'volume')) def test_arithmetic_transformer(self): expression = '100.0 * $(memory.usage) / $(memory)' scenario = [ dict(name='memory', volume=1024.0), dict(name='memory.usage', volume=512.0), ] expected = [50.0] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_expr_empty(self): expression = '' scenario = [ dict(name='memory', volume=1024.0), dict(name='memory.usage', volume=512.0), ] expected = [] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_expr_misconfigured(self): expression = '512.0 * 3' scenario = [ dict(name='memory', volume=1024.0), dict(name='memory.usage', volume=512.0), ] expected = [] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_nan(self): expression = 'float(\'nan\') * $(memory.usage) / $(memory)' scenario = [ dict(name='memory', volume=1024.0), dict(name='memory.usage', volume=512.0), ] expected = [] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_exception(self): expression = '$(memory) / 0' scenario = [ dict(name='memory', volume=1024.0), dict(name='memory.usage', volume=512.0), ] expected = [] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_multiple_samples(self): expression = '100.0 * $(memory.usage) / $(memory)' scenario = [ dict(name='memory', volume=2048.0), dict(name='memory.usage', volume=512.0), dict(name='memory', volume=1024.0), ] expected = [25.0] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_missing(self): expression = '100.0 * $(memory.usage) / $(memory)' scenario = [dict(name='memory.usage', volume=512.0)] expected = [] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_more_than_needed(self): expression = '100.0 * $(memory.usage) / $(memory)' scenario = [ dict(name='memory', volume=1024.0), dict(name='memory.usage', volume=512.0), dict(name='cpu_util', volume=90.0), ] expected = [50.0] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_cache_cleared(self): transformer_cfg = [ { 'name': 'arithmetic', 'parameters': { 'target': {'name': 'new_meter', 'expr': '$(memory.usage) + 2'} } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['memory.usage']) counter = sample.Sample( name='memory.usage', type=sample.TYPE_GAUGE, volume=1024.0, unit='MB', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata=None ) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, [counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) pipe.flush(None) self.assertEqual(1, len(publisher.samples)) self.assertEqual(1026.0, publisher.samples[0].volume) pipe.flush(None) self.assertEqual(1, len(publisher.samples)) counter.volume = 2048.0 pipe.publish_data(None, [counter]) pipe.flush(None) self.assertEqual(2, len(publisher.samples)) self.assertEqual(2050.0, publisher.samples[1].volume) def test_aggregator_timed_flush_no_matching_samples(self): timeutils.set_time_override() transformer_cfg = [ { 'name': 'aggregator', 'parameters': {'size': 900, 'retention_time': 60}, }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['unrelated-sample']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) timeutils.advance_time_seconds(200) pipe = pipeline_manager.pipelines[0] pipe.flush(None) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) def _do_test_delta(self, data, expected, growth_only=False): transformer_cfg = [ { 'name': 'delta', 'parameters': { 'target': {'name': 'new_meter'}, 'growth_only': growth_only, } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['cpu']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, data) pipe.flush(None) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(expected, len(publisher.samples)) return publisher.samples def test_delta_transformer(self): samples = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=26, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=16, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=53, unit='ns', user_id='test_user_bis', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), ] deltas = self._do_test_delta(samples, 2) self.assertEqual('new_meter', deltas[0].name) self.assertEqual('delta', deltas[0].type) self.assertEqual('ns', deltas[0].unit) self.assertEqual({'version': '2.0'}, deltas[0].resource_metadata) self.assertEqual(-10, deltas[0].volume) self.assertEqual('new_meter', deltas[1].name) self.assertEqual('delta', deltas[1].type) self.assertEqual('ns', deltas[1].unit) self.assertEqual({'version': '1.0'}, deltas[1].resource_metadata) self.assertEqual(37, deltas[1].volume) def test_delta_transformer_out_of_order(self): samples = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=26, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=16, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=((timeutils.utcnow() - datetime.timedelta(minutes=5)) .isoformat()), resource_metadata={'version': '2.0'} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=53, unit='ns', user_id='test_user_bis', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), ] deltas = self._do_test_delta(samples, 1) self.assertEqual('new_meter', deltas[0].name) self.assertEqual('delta', deltas[0].type) self.assertEqual('ns', deltas[0].unit) self.assertEqual({'version': '1.0'}, deltas[0].resource_metadata) self.assertEqual(27, deltas[0].volume) def test_delta_transformer_growth_only(self): samples = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=26, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=16, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=53, unit='ns', user_id='test_user_bis', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), ] deltas = self._do_test_delta(samples, 1, True) self.assertEqual('new_meter', deltas[0].name) self.assertEqual('delta', deltas[0].type) self.assertEqual('ns', deltas[0].unit) self.assertEqual({'version': '1.0'}, deltas[0].resource_metadata) self.assertEqual(37, deltas[0].volume) def test_unique_pipeline_names(self): self._dup_pipeline_name_cfg() self._exception_create_pipelinemanager() def test_get_pipeline_grouping_key(self): transformer_cfg = [ { 'name': 'update', 'parameters': {} }, { 'name': 'unit_conversion', 'parameters': { 'source': {}, 'target': {'name': 'cpu_mins', 'unit': 'min', 'scale': 'volume'}, } }, { 'name': 'update', 'parameters': {} }, ] self._set_pipeline_cfg('transformers', transformer_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) self.assertEqual(set(['resource_id', 'counter_name']), set(pipeline.get_pipeline_grouping_key( pipeline_manager.pipelines[0]))) def test_get_pipeline_duplicate_grouping_key(self): transformer_cfg = [ { 'name': 'update', 'parameters': {} }, { 'name': 'update', 'parameters': {} }, ] self._set_pipeline_cfg('transformers', transformer_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) self.assertEqual(['counter_name'], pipeline.get_pipeline_grouping_key( pipeline_manager.pipelines[0])) ceilometer-6.1.5/ceilometer/tests/unit/0000775000567000056710000000000013072745164021276 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/image/0000775000567000056710000000000013072745164022360 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/image/test_glance.py0000664000567000056710000002022313072744706025222 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as fixture_config from oslo_context import context from oslotest import base from oslotest import mockpatch from ceilometer.agent import manager from ceilometer.image import glance IMAGE_LIST = [ type('Image', (object,), {u'status': u'queued', u'name': "some name", u'deleted': False, u'container_format': None, u'created_at': u'2012-09-18T16:29:46', u'disk_format': None, u'updated_at': u'2012-09-18T16:29:46', u'properties': {}, u'min_disk': 0, u'protected': False, u'id': u'1d21a8d0-25f4-4e0a-b4ec-85f40237676b', u'location': None, u'checksum': None, u'owner': u'4c8364fc20184ed7971b76602aa96184', u'is_public': True, u'deleted_at': None, u'min_ram': 0, u'size': 2048}), type('Image', (object,), {u'status': u'active', u'name': "hello world", u'deleted': False, u'container_format': None, u'created_at': u'2012-09-18T16:27:41', u'disk_format': None, u'updated_at': u'2012-09-18T16:27:41', u'properties': {}, u'min_disk': 0, u'protected': False, u'id': u'22be9f90-864d-494c-aa74-8035fd535989', u'location': None, u'checksum': None, u'owner': u'9e4f98287a0246daa42eaf4025db99d4', u'is_public': True, u'deleted_at': None, u'min_ram': 0, u'size': 0}), type('Image', (object,), {u'status': u'queued', u'name': None, u'deleted': False, u'container_format': None, u'created_at': u'2012-09-18T16:23:27', u'disk_format': "raw", u'updated_at': u'2012-09-18T16:23:27', u'properties': {}, u'min_disk': 0, u'protected': False, u'id': u'8d133f6c-38a8-403c-b02c-7071b69b432d', u'location': None, u'checksum': None, u'owner': u'5f8806a76aa34ee8b8fc8397bd154319', u'is_public': True, u'deleted_at': None, u'min_ram': 0, u'size': 1024}), type('Image', (object,), {u'status': u'queued', u'name': "some name", u'deleted': False, u'container_format': None, u'created_at': u'2012-09-18T16:29:46', u'disk_format': None, u'updated_at': u'2012-09-18T16:29:46', u'properties': {}, u'min_disk': 0, u'protected': False, u'id': u'e753b196-49b4-48e8-8ca5-09ebd9805f40', u'location': None, u'checksum': None, u'owner': u'4c8364fc20184ed7971b76602aa96184', u'is_public': True, u'deleted_at': None, u'min_ram': 0, u'size': 2048}), ] ENDPOINT = 'end://point' class _BaseObject(object): pass class FakeGlanceClient(object): class images(object): pass class TestManager(manager.AgentManager): def __init__(self): super(TestManager, self).__init__() self._keystone = mock.Mock() access = self._keystone.session.auth.get_access.return_value access.service_catalog.get_endpoints = mock.Mock( return_value={'image': mock.ANY}) class TestImagePollsterPageSize(base.BaseTestCase): @staticmethod def fake_get_glance_client(ksclient, endpoint): glanceclient = FakeGlanceClient() glanceclient.images.list = mock.MagicMock(return_value=IMAGE_LIST) return glanceclient @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(TestImagePollsterPageSize, self).setUp() self.context = context.get_admin_context() self.manager = TestManager() self.useFixture(mockpatch.PatchObject( glance._Base, 'get_glance_client', side_effect=self.fake_get_glance_client)) self.CONF = self.useFixture(fixture_config.Config()).conf def _do_test_iter_images(self, page_size=0, length=0): self.CONF.set_override("glance_page_size", page_size) images = list(glance.ImagePollster(). _iter_images(self.manager.keystone, {}, ENDPOINT)) kwargs = {} if page_size > 0: kwargs['page_size'] = page_size FakeGlanceClient.images.list.assert_called_with( filters={'is_public': None}, **kwargs) self.assertEqual(length, len(images)) def test_page_size(self): self._do_test_iter_images(100, 4) def test_page_size_default(self): self._do_test_iter_images(length=4) def test_page_size_negative_number(self): self._do_test_iter_images(-1, 4) class TestImagePollster(base.BaseTestCase): @staticmethod def fake_get_glance_client(ksclient, endpoint): glanceclient = _BaseObject() setattr(glanceclient, "images", _BaseObject()) setattr(glanceclient.images, "list", lambda *args, **kwargs: iter(IMAGE_LIST)) return glanceclient @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(TestImagePollster, self).setUp() self.context = context.get_admin_context() self.manager = TestManager() self.useFixture(mockpatch.PatchObject( glance._Base, 'get_glance_client', side_effect=self.fake_get_glance_client)) def test_default_discovery(self): pollster = glance.ImagePollster() self.assertEqual('endpoint:image', pollster.default_discovery) def test_iter_images(self): # Tests whether the iter_images method returns a unique image # list when there is nothing in the cache images = list(glance.ImagePollster(). _iter_images(self.manager.keystone, {}, ENDPOINT)) self.assertEqual(len(set(image.id for image in images)), len(images)) def test_iter_images_cached(self): # Tests whether the iter_images method returns the values from # the cache cache = {'%s-images' % ENDPOINT: []} images = list(glance.ImagePollster(). _iter_images(self.manager.keystone, cache, ENDPOINT)) self.assertEqual([], images) def test_image(self): samples = list(glance.ImagePollster().get_samples(self.manager, {}, [ENDPOINT])) self.assertEqual(4, len(samples)) for sample in samples: self.assertEqual(1, sample.volume) def test_image_size(self): samples = list(glance.ImageSizePollster().get_samples(self.manager, {}, [ENDPOINT])) self.assertEqual(4, len(samples)) for image in IMAGE_LIST: self.assertTrue( any(map(lambda sample: sample.volume == image.size, samples))) def test_image_get_sample_names(self): samples = list(glance.ImagePollster().get_samples(self.manager, {}, [ENDPOINT])) self.assertEqual(set(['image']), set([s.name for s in samples])) def test_image_size_get_sample_names(self): samples = list(glance.ImageSizePollster().get_samples(self.manager, {}, [ENDPOINT])) self.assertEqual(set(['image.size']), set([s.name for s in samples])) ceilometer-6.1.5/ceilometer/tests/unit/image/__init__.py0000664000567000056710000000000013072744703024455 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/meter/0000775000567000056710000000000013072745164022412 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/meter/test_meter_plugins.py0000664000567000056710000000617313072744703026705 0ustar jenkinsjenkins00000000000000# # Copyright 2016 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import base from ceilometer.event import trait_plugins class TestTimedeltaPlugin(base.BaseTestCase): def setUp(self): super(TestTimedeltaPlugin, self).setUp() self.plugin = trait_plugins.TimedeltaPlugin() def test_timedelta_transformation(self): match_list = [('test.timestamp1', '2016-03-02T15:04:32'), ('test.timestamp2', '2016-03-02T16:04:32')] value = self.plugin.trait_value(match_list) self.assertEqual(3600, value) def test_timedelta_missing_field(self): match_list = [('test.timestamp1', '2016-03-02T15:04:32')] with mock.patch('%s.LOG' % self.plugin.trait_value.__module__) as log: self.assertIsNone(self.plugin.trait_value(match_list)) log.warning.assert_called_once_with( 'Timedelta plugin is required two timestamp fields to create ' 'timedelta value.') def test_timedelta_exceed_field(self): match_list = [('test.timestamp1', '2016-03-02T15:04:32'), ('test.timestamp2', '2016-03-02T16:04:32'), ('test.timestamp3', '2016-03-02T16:10:32')] with mock.patch('%s.LOG' % self.plugin.trait_value.__module__) as log: self.assertIsNone(self.plugin.trait_value(match_list)) log.warning.assert_called_once_with( 'Timedelta plugin is required two timestamp fields to create ' 'timedelta value.') def test_timedelta_invalid_timestamp(self): match_list = [('test.timestamp1', '2016-03-02T15:04:32'), ('test.timestamp2', '2016-03-02T15:004:32')] with mock.patch('%s.LOG' % self.plugin.trait_value.__module__) as log: self.assertIsNone(self.plugin.trait_value(match_list)) msg = log.warning._mock_call_args[0][0] self.assertTrue(msg.startswith('Failed to parse date from set ' 'fields, both fields ') ) def test_timedelta_reverse_timestamp_order(self): match_list = [('test.timestamp1', '2016-03-02T15:15:32'), ('test.timestamp2', '2016-03-02T15:10:32')] value = self.plugin.trait_value(match_list) self.assertEqual(300, value) def test_timedelta_precise_difference(self): match_list = [('test.timestamp1', '2016-03-02T15:10:32.786893'), ('test.timestamp2', '2016-03-02T15:10:32.786899')] value = self.plugin.trait_value(match_list) self.assertEqual(0.000006, value) ceilometer-6.1.5/ceilometer/tests/unit/meter/__init__.py0000664000567000056710000000000013072744703024507 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/meter/test_notifications.py0000664000567000056710000007673513072744706026717 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer.meter.notifications """ import copy import mock import os import six import yaml from oslo_config import fixture as fixture_config from oslo_utils import encodeutils from oslo_utils import fileutils import ceilometer from ceilometer import declarative from ceilometer.meter import notifications from ceilometer import service as ceilometer_service from ceilometer.tests import base as test NOTIFICATION = { 'event_type': u'test.create', 'timestamp': u'2015-06-1909: 19: 35.786893', 'payload': {u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', u'resource_id': u'bea70e51c7340cb9d555b15cbfcaec23', u'timestamp': u'2015-06-19T09:19:35.785330', u'created_at': u'2015-06-19T09:25:35.785330', u'launched_at': u'2015-06-19T09:25:40.785330', u'message_signature': u'fake_signature1', u'resource_metadata': {u'foo': u'bar'}, u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', u'volume': 1.0, u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', }, u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2', 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e', 'publisher_id': "foo123" } MIDDLEWARE_EVENT = { u'_context_request_id': u'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650', u'_context_quota_class': None, u'event_type': u'objectstore.http.request', u'_context_service_catalog': [], u'_context_auth_token': None, u'_context_user_id': None, u'priority': u'INFO', u'_context_is_admin': True, u'_context_user': None, u'publisher_id': u'ceilometermiddleware', u'message_id': u'6eccedba-120e-4db8-9735-2ad5f061e5ee', u'_context_remote_address': None, u'_context_roles': [], u'timestamp': u'2013-07-29 06:51:34.474815', u'_context_timestamp': u'2013-07-29T06:51:34.348091', u'_unique_id': u'0ee26117077648e18d88ac76e28a72e2', u'_context_project_name': None, u'_context_read_deleted': u'no', u'_context_tenant': None, u'_context_instance_lock_checked': False, u'_context_project_id': None, u'_context_user_name': None, u'payload': { 'typeURI': 'http: //schemas.dmtf.org/cloud/audit/1.0/event', 'eventTime': '2015-01-30T16: 38: 43.233621', 'target': { 'action': 'get', 'typeURI': 'service/storage/object', 'id': 'account', 'metadata': { 'path': '/1.0/CUSTOM_account/container/obj', 'version': '1.0', 'container': 'container', 'object': 'obj' } }, 'observer': { 'id': 'target' }, 'eventType': 'activity', 'measurements': [ { 'metric': { 'metricId': 'openstack: uuid', 'name': 'storage.objects.outgoing.bytes', 'unit': 'B' }, 'result': 28 }, { 'metric': { 'metricId': 'openstack: uuid2', 'name': 'storage.objects.incoming.bytes', 'unit': 'B' }, 'result': 1 } ], 'initiator': { 'typeURI': 'service/security/account/user', 'project_id': None, 'id': 'openstack: 288f6260-bf37-4737-a178-5038c84ba244' }, 'action': 'read', 'outcome': 'success', 'id': 'openstack: 69972bb6-14dd-46e4-bdaf-3148014363dc' } } FULL_MULTI_MSG = { u'_context_domain': None, u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', 'event_type': u'full.sample', 'timestamp': u'2015-06-1909: 19: 35.786893', u'_context_auth_token': None, u'_context_read_only': False, 'payload': [{ u'counter_name': u'instance1', u'user_id': u'user1', u'resource_id': u'res1', u'counter_unit': u'ns', u'counter_volume': 28.0, u'project_id': u'proj1', u'counter_type': u'gauge' }, { u'counter_name': u'instance2', u'user_id': u'user2', u'resource_id': u'res2', u'counter_unit': u'%', u'counter_volume': 1.0, u'project_id': u'proj2', u'counter_type': u'delta' }], u'_context_resource_uuid': None, u'_context_user_identity': u'fake_user_identity---', u'_context_show_deleted': False, u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', 'priority': 'info', u'_context_is_admin': True, u'_context_project_domain': None, u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2', u'_context_user_domain': None, 'publisher_id': u'ceilometer.api', 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' } METRICS_UPDATE = { u'_context_request_id': u'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650', u'_context_quota_class': None, u'event_type': u'compute.metrics.update', u'_context_service_catalog': [], u'_context_auth_token': None, u'_context_user_id': None, u'payload': { u'metrics': [ {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.frequency', 'value': 1600, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.user.time', 'value': 17421440000000, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.kernel.time', 'value': 7852600000000, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.idle.time', 'value': 1307374400000000, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.iowait.time', 'value': 11697470000000, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.user.percent', 'value': 0.012959045637294348, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.kernel.percent', 'value': 0.005841204961898534, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.idle.percent', 'value': 0.9724985141658965, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.iowait.percent', 'value': 0.008701235234910634, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.percent', 'value': 0.027501485834103515, 'source': 'libvirt.LibvirtDriver'}], u'nodename': u'tianst.sh.intel.com', u'host': u'tianst', u'host_id': u'10.0.1.1'}, u'priority': u'INFO', u'_context_is_admin': True, u'_context_user': None, u'publisher_id': u'compute.tianst.sh.intel.com', u'message_id': u'6eccedba-120e-4db8-9735-2ad5f061e5ee', u'_context_remote_address': None, u'_context_roles': [], u'timestamp': u'2013-07-29 06:51:34.474815', u'_context_timestamp': u'2013-07-29T06:51:34.348091', u'_unique_id': u'0ee26117077648e18d88ac76e28a72e2', u'_context_project_name': None, u'_context_read_deleted': u'no', u'_context_tenant': None, u'_context_instance_lock_checked': False, u'_context_project_id': None, u'_context_user_name': None } class TestMeterDefinition(test.BaseTestCase): def test_config_definition(self): cfg = dict(name="test", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id") handler = notifications.MeterDefinition(cfg, mock.Mock()) self.assertTrue(handler.match_type("test.create")) sample = list(handler.to_samples(NOTIFICATION))[0] self.assertEqual(1.0, sample["volume"]) self.assertEqual("bea70e51c7340cb9d555b15cbfcaec23", sample["resource_id"]) self.assertEqual("30be1fc9a03c4e94ab05c403a8a377f2", sample["project_id"]) def test_config_required_missing_fields(self): cfg = dict() try: notifications.MeterDefinition(cfg, mock.Mock()) except declarative.DefinitionException as e: self.assertEqual("Required fields ['name', 'type', 'event_type'," " 'unit', 'volume', 'resource_id']" " not specified", encodeutils.exception_to_unicode(e)) def test_bad_type_cfg_definition(self): cfg = dict(name="test", type="foo", event_type="bar.create", unit="foo", volume="bar", resource_id="bea70e51c7340cb9d555b15cbfcaec23") try: notifications.MeterDefinition(cfg, mock.Mock()) except declarative.DefinitionException as e: self.assertEqual("Invalid type foo specified", encodeutils.exception_to_unicode(e)) class TestMeterProcessing(test.BaseTestCase): def setUp(self): super(TestMeterProcessing, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf ceilometer_service.prepare_service(argv=[], config_files=[]) self.handler = notifications.ProcessMeterNotifications(mock.Mock()) def test_fallback_meter_path(self): self.CONF.set_override('meter_definitions_cfg_file', '/not/existing/path', group='meter') with mock.patch('ceilometer.declarative.open', mock.mock_open(read_data='---\nmetric: []'), create=True) as mock_open: self.handler._load_definitions() if six.PY3: path = os.path.dirname(ceilometer.__file__) else: path = "ceilometer" mock_open.assert_called_with(path + "/meter/data/meters.yaml") def _load_meter_def_file(self, cfg): if six.PY3: cfg = cfg.encode('utf-8') meter_cfg_file = fileutils.write_to_tempfile(content=cfg, prefix="meters", suffix="yaml") self.CONF.set_override('meter_definitions_cfg_file', meter_cfg_file, group='meter') self.handler.definitions = self.handler._load_definitions() @mock.patch('ceilometer.meter.notifications.LOG') def test_bad_meter_definition_skip(self, LOG): cfg = yaml.dump( {'metric': [dict(name="good_test_1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id"), dict(name="bad_test_2", type="bad_type", event_type="bar.create", unit="foo", volume="bar", resource_id="bea70e51c7340cb9d555b15cbfcaec23"), dict(name="good_test_3", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) self.assertEqual(2, len(self.handler.definitions)) LOG.error.assert_called_with( "Error loading meter definition : " "Invalid type bad_type specified") def test_jsonpath_values_parsed(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(NOTIFICATION)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('test1', s1['name']) self.assertEqual(1.0, s1['volume']) self.assertEqual('bea70e51c7340cb9d555b15cbfcaec23', s1['resource_id']) self.assertEqual('30be1fc9a03c4e94ab05c403a8a377f2', s1['project_id']) def test_multiple_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id"), dict(name="test2", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) data = list(self.handler.process_notification(NOTIFICATION)) self.assertEqual(2, len(data)) expected_names = ['test1', 'test2'] for s in data: self.assertIn(s.as_dict()['name'], expected_names) def test_unmatched_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.update", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(NOTIFICATION)) self.assertEqual(0, len(c)) def test_regex_match_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(NOTIFICATION)) self.assertEqual(1, len(c)) def test_default_timestamp(self): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][1] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", multi="name")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(event)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual(MIDDLEWARE_EVENT['timestamp'], s1['timestamp']) def test_custom_timestamp(self): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][1] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", multi="name", timestamp='$.payload.eventTime')]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(event)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual(MIDDLEWARE_EVENT['payload']['eventTime'], s1['timestamp']) def test_custom_timestamp_expr_meter(self): cfg = yaml.dump( {'metric': [dict(name='compute.node.cpu.frequency', event_type="compute.metrics.update", type='gauge', unit="ns", volume="$.payload.metrics[?(@.name='cpu.frequency')]" ".value", resource_id="'prefix-' + $.payload.nodename", timestamp="$.payload.metrics" "[?(@.name='cpu.frequency')].timestamp")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(METRICS_UPDATE)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('compute.node.cpu.frequency', s1['name']) self.assertEqual("2013-07-29T06:51:34.472416", s1['timestamp']) def test_default_metadata(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(NOTIFICATION)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() meta = NOTIFICATION['payload'].copy() meta['host'] = NOTIFICATION['publisher_id'] meta['event_type'] = NOTIFICATION['event_type'] self.assertEqual(meta, s1['resource_metadata']) def test_datetime_plugin(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="gauge", unit="sec", volume={"fields": ["$.payload.created_at", "$.payload.launched_at"], "plugin": "timedelta"}, resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(NOTIFICATION)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual(5.0, s1['volume']) def test_custom_metadata(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id", metadata={'proj': '$.payload.project_id', 'dict': '$.payload.resource_metadata'})]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(NOTIFICATION)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() meta = {'proj': s1['project_id'], 'dict': NOTIFICATION['payload']['resource_metadata']} self.assertEqual(meta, s1['resource_metadata']) def test_multi_match_event_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id"), dict(name="test2", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(NOTIFICATION)) self.assertEqual(2, len(c)) def test_multi_meter_payload(self): cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup=["name", "volume", "unit"])]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(MIDDLEWARE_EVENT)) self.assertEqual(2, len(c)) s1 = c[0].as_dict() self.assertEqual('storage.objects.outgoing.bytes', s1['name']) self.assertEqual(28, s1['volume']) self.assertEqual('B', s1['unit']) s2 = c[1].as_dict() self.assertEqual('storage.objects.incoming.bytes', s2['name']) self.assertEqual(1, s2['volume']) self.assertEqual('B', s2['unit']) def test_multi_meter_payload_single(self): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][1] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup=["name", "unit"])]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(event)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('storage.objects.outgoing.bytes', s1['name']) self.assertEqual(28, s1['volume']) self.assertEqual('B', s1['unit']) def test_multi_meter_payload_none(self): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup="name")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(event)) self.assertEqual(0, len(c)) def test_multi_meter_payload_all_multi(self): cfg = yaml.dump( {'metric': [dict(name="$.payload.[*].counter_name", event_type="full.sample", type="$.payload.[*].counter_type", unit="$.payload.[*].counter_unit", volume="$.payload.[*].counter_volume", resource_id="$.payload.[*].resource_id", project_id="$.payload.[*].project_id", user_id="$.payload.[*].user_id", lookup=['name', 'type', 'unit', 'volume', 'resource_id', 'project_id', 'user_id'])]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(FULL_MULTI_MSG)) self.assertEqual(2, len(c)) msg = FULL_MULTI_MSG['payload'] for idx, val in enumerate(c): s1 = val.as_dict() self.assertEqual(msg[idx]['counter_name'], s1['name']) self.assertEqual(msg[idx]['counter_volume'], s1['volume']) self.assertEqual(msg[idx]['counter_unit'], s1['unit']) self.assertEqual(msg[idx]['counter_type'], s1['type']) self.assertEqual(msg[idx]['resource_id'], s1['resource_id']) self.assertEqual(msg[idx]['project_id'], s1['project_id']) self.assertEqual(msg[idx]['user_id'], s1['user_id']) @mock.patch('ceilometer.meter.notifications.LOG') def test_multi_meter_payload_invalid_missing(self, LOG): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][0]['result'] del event['payload']['measurements'][1]['result'] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup=["name", "unit", "volume"])]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(event)) self.assertEqual(0, len(c)) LOG.warning.assert_called_with('Only 0 fetched meters contain ' '"volume" field instead of 2.') @mock.patch('ceilometer.meter.notifications.LOG') def test_multi_meter_payload_invalid_short(self, LOG): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][0]['result'] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup=["name", "unit", "volume"])]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(event)) self.assertEqual(0, len(c)) LOG.warning.assert_called_with('Only 1 fetched meters contain ' '"volume" field instead of 2.') def test_arithmetic_expr_meter(self): cfg = yaml.dump( {'metric': [dict(name='compute.node.cpu.percent', event_type="compute.metrics.update", type='gauge', unit="percent", volume="$.payload.metrics[" "?(@.name='cpu.percent')].value" " * 100", resource_id="$.payload.host + '_'" " + $.payload.nodename")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(METRICS_UPDATE)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('compute.node.cpu.percent', s1['name']) self.assertEqual(2.7501485834103514, s1['volume']) self.assertEqual("tianst_tianst.sh.intel.com", s1['resource_id']) def test_string_expr_meter(self): cfg = yaml.dump( {'metric': [dict(name='compute.node.cpu.frequency', event_type="compute.metrics.update", type='gauge', unit="ns", volume="$.payload.metrics[?(@.name='cpu.frequency')]" ".value", resource_id="$.payload.host + '_'" " + $.payload.nodename")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(METRICS_UPDATE)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('compute.node.cpu.frequency', s1['name']) self.assertEqual(1600, s1['volume']) self.assertEqual("tianst_tianst.sh.intel.com", s1['resource_id']) def test_prefix_expr_meter(self): cfg = yaml.dump( {'metric': [dict(name='compute.node.cpu.frequency', event_type="compute.metrics.update", type='gauge', unit="ns", volume="$.payload.metrics[?(@.name='cpu.frequency')]" ".value", resource_id="'prefix-' + $.payload.nodename")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(METRICS_UPDATE)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('compute.node.cpu.frequency', s1['name']) self.assertEqual(1600, s1['volume']) self.assertEqual("prefix-tianst.sh.intel.com", s1['resource_id']) def test_duplicate_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id"), dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(NOTIFICATION)) self.assertEqual(1, len(c)) ceilometer-6.1.5/ceilometer/tests/unit/test_event_pipeline.py0000664000567000056710000004211413072744706025720 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import traceback import uuid import mock from oslo_config import fixture as fixture_config import oslo_messaging from oslotest import base from oslotest import mockpatch from ceilometer.event.storage import models from ceilometer import pipeline from ceilometer import publisher from ceilometer.publisher import test as test_publisher from ceilometer.publisher import utils class EventPipelineTestCase(base.BaseTestCase): def get_publisher(self, url, namespace=''): fake_drivers = {'test://': test_publisher.TestPublisher, 'new://': test_publisher.TestPublisher, 'except://': self.PublisherClassException} return fake_drivers[url](url) class PublisherClassException(publisher.PublisherBase): def publish_samples(self, ctxt, samples): pass def publish_events(self, ctxt, events): raise Exception() def setUp(self): super(EventPipelineTestCase, self).setUp() self.p_type = pipeline.EVENT_TYPE self.transformer_manager = None self.test_event = models.Event( message_id=uuid.uuid4(), event_type='a', generated=datetime.datetime.utcnow(), traits=[ models.Trait('t_text', 1, 'text_trait'), models.Trait('t_int', 2, 'int_trait'), models.Trait('t_float', 3, 'float_trait'), models.Trait('t_datetime', 4, 'datetime_trait') ], raw={'status': 'started'} ) self.test_event2 = models.Event( message_id=uuid.uuid4(), event_type='b', generated=datetime.datetime.utcnow(), traits=[ models.Trait('t_text', 1, 'text_trait'), models.Trait('t_int', 2, 'int_trait'), models.Trait('t_float', 3, 'float_trait'), models.Trait('t_datetime', 4, 'datetime_trait') ], raw={'status': 'stopped'} ) self.useFixture(mockpatch.PatchObject( publisher, 'get_publisher', side_effect=self.get_publisher)) self._setup_pipeline_cfg() self._reraise_exception = True self.useFixture(mockpatch.Patch( 'ceilometer.pipeline.LOG.exception', side_effect=self._handle_reraise_exception)) def _handle_reraise_exception(self, msg): if self._reraise_exception: raise Exception(traceback.format_exc()) def _setup_pipeline_cfg(self): """Setup the appropriate form of pipeline config.""" source = {'name': 'test_source', 'events': ['a'], 'sinks': ['test_sink']} sink = {'name': 'test_sink', 'publishers': ['test://']} self.pipeline_cfg = {'sources': [source], 'sinks': [sink]} def _augment_pipeline_cfg(self): """Augment the pipeline config with an additional element.""" self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'events': ['b'], 'sinks': ['second_sink'] }) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'publishers': ['new://'], }) def _break_pipeline_cfg(self): """Break the pipeline config with a malformed element.""" self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'events': ['b'], 'sinks': ['second_sink'] }) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'publishers': ['except'], }) def _dup_pipeline_name_cfg(self): """Break the pipeline config with duplicate pipeline name.""" self.pipeline_cfg['sources'].append({ 'name': 'test_source', 'events': ['a'], 'sinks': ['test_sink'] }) def _set_pipeline_cfg(self, field, value): if field in self.pipeline_cfg['sources'][0]: self.pipeline_cfg['sources'][0][field] = value else: self.pipeline_cfg['sinks'][0][field] = value def _extend_pipeline_cfg(self, field, value): if field in self.pipeline_cfg['sources'][0]: self.pipeline_cfg['sources'][0][field].extend(value) else: self.pipeline_cfg['sinks'][0][field].extend(value) def _unset_pipeline_cfg(self, field): if field in self.pipeline_cfg['sources'][0]: del self.pipeline_cfg['sources'][0][field] else: del self.pipeline_cfg['sinks'][0][field] def _exception_create_pipelinemanager(self): self.assertRaises(pipeline.PipelineException, pipeline.PipelineManager, self.pipeline_cfg, self.transformer_manager, self.p_type) def test_no_events(self): self._unset_pipeline_cfg('events') self._exception_create_pipelinemanager() def test_no_name(self): self._unset_pipeline_cfg('name') self._exception_create_pipelinemanager() def test_name(self): pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) for pipe in pipeline_manager.pipelines: self.assertTrue(pipe.name.startswith('event:')) def test_no_publishers(self): self._unset_pipeline_cfg('publishers') self._exception_create_pipelinemanager() def test_check_events_include_exclude_same(self): event_cfg = ['a', '!a'] self._set_pipeline_cfg('events', event_cfg) self._exception_create_pipelinemanager() def test_check_events_include_exclude(self): event_cfg = ['a', '!b'] self._set_pipeline_cfg('events', event_cfg) self._exception_create_pipelinemanager() def test_check_events_wildcard_included(self): event_cfg = ['a', '*'] self._set_pipeline_cfg('events', event_cfg) self._exception_create_pipelinemanager() def test_check_publishers_invalid_publisher(self): publisher_cfg = ['test_invalid'] self._set_pipeline_cfg('publishers', publisher_cfg) def test_multiple_included_events(self): event_cfg = ['a', 'b'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) with pipeline_manager.publisher(None) as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) with pipeline_manager.publisher(None) as p: p([self.test_event2]) self.assertEqual(2, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) self.assertEqual('b', getattr(publisher.events[1], 'event_type')) def test_event_non_match(self): event_cfg = ['nomatch'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) with pipeline_manager.publisher(None) as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.events)) self.assertEqual(0, publisher.calls) def test_wildcard_event(self): event_cfg = ['*'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) with pipeline_manager.publisher(None) as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_wildcard_excluded_events(self): event_cfg = ['*', '!a'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) self.assertFalse(pipeline_manager.pipelines[0].support_event('a')) def test_wildcard_excluded_events_not_excluded(self): event_cfg = ['*', '!b'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) with pipeline_manager.publisher(None) as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_all_excluded_events_not_excluded(self): event_cfg = ['!b', '!c'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) with pipeline_manager.publisher(None) as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_all_excluded_events_excluded(self): event_cfg = ['!a', '!c'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) self.assertFalse(pipeline_manager.pipelines[0].support_event('a')) self.assertTrue(pipeline_manager.pipelines[0].support_event('b')) self.assertFalse(pipeline_manager.pipelines[0].support_event('c')) def test_wildcard_and_excluded_wildcard_events(self): event_cfg = ['*', '!compute.*'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) self.assertFalse(pipeline_manager.pipelines[0]. support_event('compute.instance.create.start')) self.assertTrue(pipeline_manager.pipelines[0]. support_event('identity.user.create')) def test_included_event_and_wildcard_events(self): event_cfg = ['compute.instance.create.start', 'identity.*'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) self.assertTrue(pipeline_manager.pipelines[0]. support_event('identity.user.create')) self.assertTrue(pipeline_manager.pipelines[0]. support_event('compute.instance.create.start')) self.assertFalse(pipeline_manager.pipelines[0]. support_event('compute.instance.create.stop')) def test_excluded_event_and_excluded_wildcard_events(self): event_cfg = ['!compute.instance.create.start', '!identity.*'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) self.assertFalse(pipeline_manager.pipelines[0]. support_event('identity.user.create')) self.assertFalse(pipeline_manager.pipelines[0]. support_event('compute.instance.create.start')) self.assertTrue(pipeline_manager.pipelines[0]. support_event('compute.instance.create.stop')) def test_multiple_pipeline(self): self._augment_pipeline_cfg() pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) with pipeline_manager.publisher(None) as p: p([self.test_event, self.test_event2]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) self.assertEqual(1, publisher.calls) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) new_publisher = pipeline_manager.pipelines[1].publishers[0] self.assertEqual(1, len(new_publisher.events)) self.assertEqual(1, new_publisher.calls) self.assertEqual('b', getattr(new_publisher.events[0], 'event_type')) def test_multiple_publisher(self): self._set_pipeline_cfg('publishers', ['test://', 'new://']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) with pipeline_manager.publisher(None) as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] new_publisher = pipeline_manager.pipelines[0].publishers[1] self.assertEqual(1, len(publisher.events)) self.assertEqual(1, len(new_publisher.events)) self.assertEqual('a', getattr(new_publisher.events[0], 'event_type')) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_multiple_publisher_isolation(self): self._reraise_exception = False self._set_pipeline_cfg('publishers', ['except://', 'new://']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) with pipeline_manager.publisher(None) as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[1] self.assertEqual(1, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_unique_pipeline_names(self): self._dup_pipeline_name_cfg() self._exception_create_pipelinemanager() def test_event_pipeline_endpoint_requeue_on_failure(self): self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF([]) self.CONF.set_override("ack_on_event_error", False, group="notification") self.CONF.set_override("telemetry_secret", "not-so-secret", group="publisher") test_data = { 'message_id': uuid.uuid4(), 'event_type': 'a', 'generated': '2013-08-08 21:06:37.803826', 'traits': [ {'name': 't_text', 'value': 1, 'dtype': 'text_trait' } ], 'raw': {'status': 'started'} } message_sign = utils.compute_signature(test_data, 'not-so-secret') test_data['message_signature'] = message_sign fake_publisher = mock.Mock() self.useFixture(mockpatch.Patch( 'ceilometer.publisher.test.TestPublisher', return_value=fake_publisher)) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) event_pipeline_endpoint = pipeline.EventPipelineEndpoint( mock.Mock(), pipeline_manager.pipelines[0]) fake_publisher.publish_events.side_effect = Exception ret = event_pipeline_endpoint.sample([ {'ctxt': {}, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'a', 'payload': [test_data], 'metadata': {}}]) self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) ceilometer-6.1.5/ceilometer/tests/unit/compute/0000775000567000056710000000000013072745164022752 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/compute/pollsters/0000775000567000056710000000000013072745164025001 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/compute/pollsters/test_memory.py0000664000567000056710000001101713072744706027723 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from ceilometer.agent import manager from ceilometer.compute.pollsters import memory from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.tests.unit.compute.pollsters import base class TestMemoryPollster(base.TestPollsterBase): def setUp(self): super(TestMemoryPollster, self).setUp() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): next_value = iter(( virt_inspector.MemoryUsageStats(usage=1.0), virt_inspector.MemoryUsageStats(usage=2.0), virt_inspector.NoDataException(), virt_inspector.InstanceShutOffException(), )) def inspect_memory_usage(instance, duration): value = next(next_value) if isinstance(value, virt_inspector.MemoryUsageStats): return value else: raise value self.inspector.inspect_memory_usage = mock.Mock( side_effect=inspect_memory_usage) mgr = manager.AgentManager() pollster = memory.MemoryUsagePollster() @mock.patch('ceilometer.compute.pollsters.memory.LOG') def _verify_memory_metering(expected_count, expected_memory_mb, expected_warnings, mylog): samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(expected_count, len(samples)) if expected_count > 0: self.assertEqual(set(['memory.usage']), set([s.name for s in samples])) self.assertEqual(expected_memory_mb, samples[0].volume) else: self.assertEqual(expected_warnings, mylog.warning.call_count) self.assertEqual(0, mylog.exception.call_count) _verify_memory_metering(1, 1.0, 0) _verify_memory_metering(1, 2.0, 0) _verify_memory_metering(0, 0, 1) _verify_memory_metering(0, 0, 0) class TestResidentMemoryPollster(base.TestPollsterBase): def setUp(self): super(TestResidentMemoryPollster, self).setUp() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): next_value = iter(( virt_inspector.MemoryResidentStats(resident=1.0), virt_inspector.MemoryResidentStats(resident=2.0), virt_inspector.NoDataException(), virt_inspector.InstanceShutOffException(), )) def inspect_memory_resident(instance, duration): value = next(next_value) if isinstance(value, virt_inspector.MemoryResidentStats): return value else: raise value self.inspector.inspect_memory_resident = mock.Mock( side_effect=inspect_memory_resident) mgr = manager.AgentManager() pollster = memory.MemoryResidentPollster() @mock.patch('ceilometer.compute.pollsters.memory.LOG') def _verify_resident_memory_metering(expected_count, expected_resident_memory_mb, expected_warnings, mylog): samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(expected_count, len(samples)) if expected_count > 0: self.assertEqual(set(['memory.resident']), set([s.name for s in samples])) self.assertEqual(expected_resident_memory_mb, samples[0].volume) else: self.assertEqual(expected_warnings, mylog.warning.call_count) self.assertEqual(0, mylog.exception.call_count) _verify_resident_memory_metering(1, 1.0, 0) _verify_resident_memory_metering(1, 2.0, 0) _verify_resident_memory_metering(0, 0, 1) _verify_resident_memory_metering(0, 0, 0) ceilometer-6.1.5/ceilometer/tests/unit/compute/pollsters/base.py0000664000567000056710000000415313072744706026271 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import mockpatch import ceilometer.tests.base as base class TestPollsterBase(base.BaseTestCase): def setUp(self): super(TestPollsterBase, self).setUp() self.inspector = mock.Mock() self.instance = mock.MagicMock() self.instance.name = 'instance-00000001' setattr(self.instance, 'OS-EXT-SRV-ATTR:instance_name', self.instance.name) setattr(self.instance, 'OS-EXT-STS:vm_state', 'active') self.instance.id = 1 self.instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, 'ram': 512, 'disk': 20, 'ephemeral': 0} self.instance.status = 'active' self.instance.metadata = { 'fqdn': 'vm_fqdn', 'metering.stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128', 'project_cos': 'dev'} patch_virt = mockpatch.Patch( 'ceilometer.compute.virt.inspector.get_hypervisor_inspector', new=mock.Mock(return_value=self.inspector)) self.useFixture(patch_virt) # as we're having lazy hypervisor inspector singleton object in the # base compute pollster class, that leads to the fact that we # need to mock all this class property to avoid context sharing between # the tests patch_inspector = mockpatch.Patch( 'ceilometer.compute.pollsters.BaseComputePollster.inspector', self.inspector) self.useFixture(patch_inspector) ceilometer-6.1.5/ceilometer/tests/unit/compute/pollsters/test_diskio.py0000664000567000056710000003600313072744706027677 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # Copyright 2014 Cisco Systems, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import mockpatch from ceilometer.agent import manager from ceilometer.compute.pollsters import disk from ceilometer.compute.virt import inspector as virt_inspector import ceilometer.tests.base as base class TestBaseDiskIO(base.BaseTestCase): TYPE = 'cumulative' def setUp(self): super(TestBaseDiskIO, self).setUp() self.inspector = mock.Mock() self.instance = self._get_fake_instances() patch_virt = mockpatch.Patch( 'ceilometer.compute.virt.inspector.get_hypervisor_inspector', new=mock.Mock(return_value=self.inspector)) self.useFixture(patch_virt) # as we're having lazy hypervisor inspector singleton object in the # base compute pollster class, that leads to the fact that we # need to mock all this class property to avoid context sharing between # the tests patch_inspector = mockpatch.Patch( 'ceilometer.compute.pollsters.BaseComputePollster.inspector', self.inspector) self.useFixture(patch_inspector) @staticmethod def _get_fake_instances(): instances = [] for i in [1, 2]: instance = mock.MagicMock() instance.name = 'instance-%s' % i setattr(instance, 'OS-EXT-SRV-ATTR:instance_name', instance.name) instance.id = i instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, 'ram': 512, 'disk': 20, 'ephemeral': 0} instance.status = 'active' instances.append(instance) return instances @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def _check_get_samples(self, factory, name, expected_count=2): pollster = factory() mgr = manager.AgentManager() cache = {} samples = list(pollster.get_samples(mgr, cache, self.instance)) self.assertIsNotEmpty(samples) cache_key = getattr(pollster, self.CACHE_KEY) self.assertIn(cache_key, cache) for instance in self.instance: self.assertIn(instance.id, cache[cache_key]) self.assertEqual(set([name]), set([s.name for s in samples])) match = [s for s in samples if s.name == name] self.assertEqual(len(match), expected_count, 'missing counter %s' % name) return match def _check_aggregate_samples(self, factory, name, expected_volume, expected_device=None): match = self._check_get_samples(factory, name) self.assertEqual(expected_volume, match[0].volume) self.assertEqual(self.TYPE, match[0].type) if expected_device is not None: self.assertEqual(set(expected_device), set(match[0].resource_metadata.get('device'))) instances = [i.id for i in self.instance] for m in match: self.assertIn(m.resource_id, instances) def _check_per_device_samples(self, factory, name, expected_volume, expected_device=None): match = self._check_get_samples(factory, name, expected_count=4) match_dict = {} for m in match: match_dict[m.resource_id] = m for instance in self.instance: key = "%s-%s" % (instance.id, expected_device) self.assertEqual(expected_volume, match_dict[key].volume) self.assertEqual(self.TYPE, match_dict[key].type) self.assertEqual(key, match_dict[key].resource_id) class TestDiskPollsters(TestBaseDiskIO): DISKS = [ (virt_inspector.Disk(device='vda1'), virt_inspector.DiskStats(read_bytes=1, read_requests=2, write_bytes=3, write_requests=4, errors=-1)), (virt_inspector.Disk(device='vda2'), virt_inspector.DiskStats(read_bytes=2, read_requests=3, write_bytes=5, write_requests=7, errors=-1)), ] CACHE_KEY = "CACHE_KEY_DISK" def setUp(self): super(TestDiskPollsters, self).setUp() self.inspector.inspect_disks = mock.Mock(return_value=self.DISKS) def test_disk_read_requests(self): self._check_aggregate_samples(disk.ReadRequestsPollster, 'disk.read.requests', 5, expected_device=['vda1', 'vda2']) def test_disk_read_bytes(self): self._check_aggregate_samples(disk.ReadBytesPollster, 'disk.read.bytes', 3, expected_device=['vda1', 'vda2']) def test_disk_write_requests(self): self._check_aggregate_samples(disk.WriteRequestsPollster, 'disk.write.requests', 11, expected_device=['vda1', 'vda2']) def test_disk_write_bytes(self): self._check_aggregate_samples(disk.WriteBytesPollster, 'disk.write.bytes', 8, expected_device=['vda1', 'vda2']) def test_per_disk_read_requests(self): self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, 'disk.device.read.requests', 2, 'vda1') self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, 'disk.device.read.requests', 3, 'vda2') def test_per_disk_write_requests(self): self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, 'disk.device.write.requests', 4, 'vda1') self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, 'disk.device.write.requests', 7, 'vda2') def test_per_disk_read_bytes(self): self._check_per_device_samples(disk.PerDeviceReadBytesPollster, 'disk.device.read.bytes', 1, 'vda1') self._check_per_device_samples(disk.PerDeviceReadBytesPollster, 'disk.device.read.bytes', 2, 'vda2') def test_per_disk_write_bytes(self): self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, 'disk.device.write.bytes', 3, 'vda1') self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, 'disk.device.write.bytes', 5, 'vda2') class TestDiskRatePollsters(TestBaseDiskIO): DISKS = [ (virt_inspector.Disk(device='disk1'), virt_inspector.DiskRateStats(1024, 300, 5120, 700)), (virt_inspector.Disk(device='disk2'), virt_inspector.DiskRateStats(2048, 400, 6144, 800)) ] TYPE = 'gauge' CACHE_KEY = "CACHE_KEY_DISK_RATE" def setUp(self): super(TestDiskRatePollsters, self).setUp() self.inspector.inspect_disk_rates = mock.Mock(return_value=self.DISKS) def test_disk_read_bytes_rate(self): self._check_aggregate_samples(disk.ReadBytesRatePollster, 'disk.read.bytes.rate', 3072, expected_device=['disk1', 'disk2']) def test_disk_read_requests_rate(self): self._check_aggregate_samples(disk.ReadRequestsRatePollster, 'disk.read.requests.rate', 700, expected_device=['disk1', 'disk2']) def test_disk_write_bytes_rate(self): self._check_aggregate_samples(disk.WriteBytesRatePollster, 'disk.write.bytes.rate', 11264, expected_device=['disk1', 'disk2']) def test_disk_write_requests_rate(self): self._check_aggregate_samples(disk.WriteRequestsRatePollster, 'disk.write.requests.rate', 1500, expected_device=['disk1', 'disk2']) def test_per_disk_read_bytes_rate(self): self._check_per_device_samples(disk.PerDeviceReadBytesRatePollster, 'disk.device.read.bytes.rate', 1024, 'disk1') self._check_per_device_samples(disk.PerDeviceReadBytesRatePollster, 'disk.device.read.bytes.rate', 2048, 'disk2') def test_per_disk_read_requests_rate(self): self._check_per_device_samples(disk.PerDeviceReadRequestsRatePollster, 'disk.device.read.requests.rate', 300, 'disk1') self._check_per_device_samples(disk.PerDeviceReadRequestsRatePollster, 'disk.device.read.requests.rate', 400, 'disk2') def test_per_disk_write_bytes_rate(self): self._check_per_device_samples(disk.PerDeviceWriteBytesRatePollster, 'disk.device.write.bytes.rate', 5120, 'disk1') self._check_per_device_samples(disk.PerDeviceWriteBytesRatePollster, 'disk.device.write.bytes.rate', 6144, 'disk2') def test_per_disk_write_requests_rate(self): self._check_per_device_samples(disk.PerDeviceWriteRequestsRatePollster, 'disk.device.write.requests.rate', 700, 'disk1') self._check_per_device_samples(disk.PerDeviceWriteRequestsRatePollster, 'disk.device.write.requests.rate', 800, 'disk2') class TestDiskLatencyPollsters(TestBaseDiskIO): DISKS = [ (virt_inspector.Disk(device='disk1'), virt_inspector.DiskLatencyStats(1000)), (virt_inspector.Disk(device='disk2'), virt_inspector.DiskLatencyStats(2000)) ] TYPE = 'gauge' CACHE_KEY = "CACHE_KEY_DISK_LATENCY" def setUp(self): super(TestDiskLatencyPollsters, self).setUp() self.inspector.inspect_disk_latency = mock.Mock( return_value=self.DISKS) def test_disk_latency(self): self._check_aggregate_samples(disk.DiskLatencyPollster, 'disk.latency', 3) def test_per_device_latency(self): self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster, 'disk.device.latency', 1, 'disk1') self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster, 'disk.device.latency', 2, 'disk2') class TestDiskIOPSPollsters(TestBaseDiskIO): DISKS = [ (virt_inspector.Disk(device='disk1'), virt_inspector.DiskIOPSStats(10)), (virt_inspector.Disk(device='disk2'), virt_inspector.DiskIOPSStats(20)), ] TYPE = 'gauge' CACHE_KEY = "CACHE_KEY_DISK_IOPS" def setUp(self): super(TestDiskIOPSPollsters, self).setUp() self.inspector.inspect_disk_iops = mock.Mock(return_value=self.DISKS) def test_disk_iops(self): self._check_aggregate_samples(disk.DiskIOPSPollster, 'disk.iops', 30) def test_per_device_iops(self): self._check_per_device_samples(disk.PerDeviceDiskIOPSPollster, 'disk.device.iops', 10, 'disk1') self._check_per_device_samples(disk.PerDeviceDiskIOPSPollster, 'disk.device.iops', 20, 'disk2') class TestDiskInfoPollsters(TestBaseDiskIO): DISKS = [ (virt_inspector.Disk(device='vda1'), virt_inspector.DiskInfo(capacity=3, allocation=2, physical=1)), (virt_inspector.Disk(device='vda2'), virt_inspector.DiskInfo(capacity=4, allocation=3, physical=2)), ] TYPE = 'gauge' CACHE_KEY = "CACHE_KEY_DISK_INFO" def setUp(self): super(TestDiskInfoPollsters, self).setUp() self.inspector.inspect_disk_info = mock.Mock(return_value=self.DISKS) def test_disk_capacity(self): self._check_aggregate_samples(disk.CapacityPollster, 'disk.capacity', 7, expected_device=['vda1', 'vda2']) def test_disk_allocation(self): self._check_aggregate_samples(disk.AllocationPollster, 'disk.allocation', 5, expected_device=['vda1', 'vda2']) def test_disk_physical(self): self._check_aggregate_samples(disk.PhysicalPollster, 'disk.usage', 3, expected_device=['vda1', 'vda2']) def test_per_disk_capacity(self): self._check_per_device_samples(disk.PerDeviceCapacityPollster, 'disk.device.capacity', 3, 'vda1') self._check_per_device_samples(disk.PerDeviceCapacityPollster, 'disk.device.capacity', 4, 'vda2') def test_per_disk_allocation(self): self._check_per_device_samples(disk.PerDeviceAllocationPollster, 'disk.device.allocation', 2, 'vda1') self._check_per_device_samples(disk.PerDeviceAllocationPollster, 'disk.device.allocation', 3, 'vda2') def test_per_disk_physical(self): self._check_per_device_samples(disk.PerDevicePhysicalPollster, 'disk.device.usage', 1, 'vda1') self._check_per_device_samples(disk.PerDevicePhysicalPollster, 'disk.device.usage', 2, 'vda2') ceilometer-6.1.5/ceilometer/tests/unit/compute/pollsters/test_location_metadata.py0000664000567000056710000001173513072744706032072 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the compute pollsters. """ import mock from oslotest import base import six from ceilometer.agent import manager from ceilometer.compute.pollsters import util class FauxInstance(object): def __init__(self, **kwds): for name, value in kwds.items(): setattr(self, name, value) def __getitem__(self, key): return getattr(self, key) def get(self, key, default): try: return getattr(self, key) except AttributeError: return default class TestLocationMetadata(base.BaseTestCase): @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): self.manager = manager.AgentManager() super(TestLocationMetadata, self).setUp() # Mimics an instance returned from nova api call self.INSTANCE_PROPERTIES = {'name': 'display name', 'id': ('234cbe81-4e09-4f64-9b2a-' '714f6b9046e3'), 'OS-EXT-SRV-ATTR:instance_name': 'instance-000001', 'OS-EXT-AZ:availability_zone': 'foo-zone', 'reservation_id': 'reservation id', 'architecture': 'x86_64', 'kernel_id': 'kernel id', 'os_type': 'linux', 'ramdisk_id': 'ramdisk id', 'status': 'active', 'ephemeral_gb': 0, 'root_gb': 20, 'disk_gb': 20, 'image': {'id': 1, 'links': [{"rel": "bookmark", 'href': 2}]}, 'hostId': '1234-5678', 'OS-EXT-SRV-ATTR:host': 'host-test', 'flavor': {'name': 'm1.tiny', 'id': 1, 'disk': 20, 'ram': 512, 'vcpus': 2, 'ephemeral': 0}, 'metadata': {'metering.autoscale.group': 'X' * 512, 'metering.ephemeral_gb': 42}} self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) def test_metadata(self): md = util._get_metadata_from_object(self.instance) for prop, value in six.iteritems(self.INSTANCE_PROPERTIES): if prop not in ("metadata"): # Special cases if prop == 'name': prop = 'display_name' elif prop == 'hostId': prop = "host" elif prop == 'OS-EXT-SRV-ATTR:host': prop = "instance_host" elif prop == 'OS-EXT-SRV-ATTR:instance_name': prop = 'name' elif prop == "id": prop = "instance_id" self.assertEqual(value, md[prop]) user_metadata = md['user_metadata'] expected = self.INSTANCE_PROPERTIES[ 'metadata']['metering.autoscale.group'][:256] self.assertEqual(expected, user_metadata['autoscale_group']) self.assertEqual(1, len(user_metadata)) def test_metadata_empty_image(self): self.INSTANCE_PROPERTIES['image'] = None self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) md = util._get_metadata_from_object(self.instance) self.assertIsNone(md['image']) self.assertIsNone(md['image_ref']) self.assertIsNone(md['image_ref_url']) def test_metadata_image_through_conductor(self): # There should be no links here, should default to None self.INSTANCE_PROPERTIES['image'] = {'id': 1} self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) md = util._get_metadata_from_object(self.instance) self.assertEqual(1, md['image_ref']) self.assertIsNone(md['image_ref_url']) ceilometer-6.1.5/ceilometer/tests/unit/compute/pollsters/test_cpu.py0000664000567000056710000000750713072744706027213 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import mock from ceilometer.agent import manager from ceilometer.compute.pollsters import cpu from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.tests.unit.compute.pollsters import base class TestCPUPollster(base.TestPollsterBase): def setUp(self): super(TestCPUPollster, self).setUp() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): next_value = iter(( virt_inspector.CPUStats(time=1 * (10 ** 6), number=2), virt_inspector.CPUStats(time=3 * (10 ** 6), number=2), # cpu_time resets on instance restart virt_inspector.CPUStats(time=2 * (10 ** 6), number=2), )) def inspect_cpus(name): return next(next_value) self.inspector.inspect_cpus = mock.Mock(side_effect=inspect_cpus) mgr = manager.AgentManager() pollster = cpu.CPUPollster() def _verify_cpu_metering(expected_time): cache = {} samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(set(['cpu']), set([s.name for s in samples])) self.assertEqual(expected_time, samples[0].volume) self.assertEqual(2, samples[0].resource_metadata.get('cpu_number')) # ensure elapsed time between polling cycles is non-zero time.sleep(0.001) _verify_cpu_metering(1 * (10 ** 6)) _verify_cpu_metering(3 * (10 ** 6)) _verify_cpu_metering(2 * (10 ** 6)) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples_no_caching(self): cpu_stats = virt_inspector.CPUStats(time=1 * (10 ** 6), number=2) self.inspector.inspect_cpus = mock.Mock(return_value=cpu_stats) mgr = manager.AgentManager() pollster = cpu.CPUPollster() cache = {} samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(10 ** 6, samples[0].volume) self.assertEqual(0, len(cache)) class TestCPUUtilPollster(base.TestPollsterBase): def setUp(self): super(TestCPUUtilPollster, self).setUp() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): next_value = iter(( virt_inspector.CPUUtilStats(util=40), virt_inspector.CPUUtilStats(util=60), )) def inspect_cpu_util(name, duration): return next(next_value) self.inspector.inspect_cpu_util = (mock. Mock(side_effect=inspect_cpu_util)) mgr = manager.AgentManager() pollster = cpu.CPUUtilPollster() def _verify_cpu_util_metering(expected_util): cache = {} samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(set(['cpu_util']), set([s.name for s in samples])) self.assertEqual(expected_util, samples[0].volume) _verify_cpu_util_metering(40) _verify_cpu_util_metering(60) ceilometer-6.1.5/ceilometer/tests/unit/compute/pollsters/test_net.py0000664000567000056710000003157713072744706027216 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from ceilometer.agent import manager from ceilometer.compute.pollsters import net from ceilometer.compute.virt import inspector as virt_inspector from ceilometer import sample from ceilometer.tests.unit.compute.pollsters import base class FauxInstance(object): def __init__(self, **kwargs): for name, value in kwargs.items(): setattr(self, name, value) def __getitem__(self, key): return getattr(self, key) def get(self, key, default): return getattr(self, key, default) class TestNetPollster(base.TestPollsterBase): def setUp(self): super(TestNetPollster, self).setUp() self.vnic0 = virt_inspector.Interface( name='vnet0', fref='fa163e71ec6e', mac='fa:16:3e:71:ec:6d', parameters=dict(ip='10.0.0.2', projmask='255.255.255.0', projnet='proj1', dhcp_server='10.0.0.1')) stats0 = virt_inspector.InterfaceStats(rx_bytes=1, rx_packets=2, tx_bytes=3, tx_packets=4) self.vnic1 = virt_inspector.Interface( name='vnet1', fref='fa163e71ec6f', mac='fa:16:3e:71:ec:6e', parameters=dict(ip='192.168.0.3', projmask='255.255.255.0', projnet='proj2', dhcp_server='10.0.0.2')) stats1 = virt_inspector.InterfaceStats(rx_bytes=5, rx_packets=6, tx_bytes=7, tx_packets=8) self.vnic2 = virt_inspector.Interface( name='vnet2', fref=None, mac='fa:18:4e:72:fc:7e', parameters=dict(ip='192.168.0.4', projmask='255.255.255.0', projnet='proj3', dhcp_server='10.0.0.3')) stats2 = virt_inspector.InterfaceStats(rx_bytes=9, rx_packets=10, tx_bytes=11, tx_packets=12) vnics = [ (self.vnic0, stats0), (self.vnic1, stats1), (self.vnic2, stats2), ] self.inspector.inspect_vnics = mock.Mock(return_value=vnics) self.INSTANCE_PROPERTIES = {'name': 'display name', 'OS-EXT-SRV-ATTR:instance_name': 'instance-000001', 'OS-EXT-AZ:availability_zone': 'foo-zone', 'reservation_id': 'reservation id', 'id': 'instance id', 'user_id': 'user id', 'tenant_id': 'tenant id', 'architecture': 'x86_64', 'kernel_id': 'kernel id', 'os_type': 'linux', 'ramdisk_id': 'ramdisk id', 'status': 'active', 'ephemeral_gb': 0, 'root_gb': 20, 'disk_gb': 20, 'image': {'id': 1, 'links': [{"rel": "bookmark", 'href': 2}]}, 'hostId': '1234-5678', 'OS-EXT-SRV-ATTR:host': 'host-test', 'flavor': {'disk': 20, 'ram': 512, 'name': 'tiny', 'vcpus': 2, 'ephemeral': 0}, 'metadata': {'metering.autoscale.group': 'X' * 512, 'metering.foobar': 42}} self.faux_instance = FauxInstance(**self.INSTANCE_PROPERTIES) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def _check_get_samples(self, factory, expected): mgr = manager.AgentManager() pollster = factory() samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(3, len(samples)) # one for each nic self.assertEqual(set([samples[0].name]), set([s.name for s in samples])) def _verify_vnic_metering(ip, expected_volume, expected_rid): match = [s for s in samples if s.resource_metadata['parameters']['ip'] == ip ] self.assertEqual(len(match), 1, 'missing ip %s' % ip) self.assertEqual(expected_volume, match[0].volume) self.assertEqual('cumulative', match[0].type) self.assertEqual(expected_rid, match[0].resource_id) for ip, volume, rid in expected: _verify_vnic_metering(ip, volume, rid) def test_incoming_bytes(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.IncomingBytesPollster, [('10.0.0.2', 1, self.vnic0.fref), ('192.168.0.3', 5, self.vnic1.fref), ('192.168.0.4', 9, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) def test_outgoing_bytes(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.OutgoingBytesPollster, [('10.0.0.2', 3, self.vnic0.fref), ('192.168.0.3', 7, self.vnic1.fref), ('192.168.0.4', 11, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) def test_incoming_packets(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.IncomingPacketsPollster, [('10.0.0.2', 2, self.vnic0.fref), ('192.168.0.3', 6, self.vnic1.fref), ('192.168.0.4', 10, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) def test_outgoing_packets(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.OutgoingPacketsPollster, [('10.0.0.2', 4, self.vnic0.fref), ('192.168.0.3', 8, self.vnic1.fref), ('192.168.0.4', 12, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_metadata(self): factory = net.OutgoingBytesPollster pollster = factory() sm = pollster.make_vnic_sample(self.faux_instance, name='network.outgoing.bytes', type=sample.TYPE_CUMULATIVE, unit='B', volume=100, vnic_data=self.vnic0) user_metadata = sm.resource_metadata['user_metadata'] expected = self.INSTANCE_PROPERTIES[ 'metadata']['metering.autoscale.group'][:256] self.assertEqual(expected, user_metadata['autoscale_group']) self.assertEqual(2, len(user_metadata)) class TestNetPollsterCache(base.TestPollsterBase): @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def _check_get_samples_cache(self, factory): vnic0 = virt_inspector.Interface( name='vnet0', fref='fa163e71ec6e', mac='fa:16:3e:71:ec:6d', parameters=dict(ip='10.0.0.2', projmask='255.255.255.0', projnet='proj1', dhcp_server='10.0.0.1')) stats0 = virt_inspector.InterfaceStats(rx_bytes=1, rx_packets=2, tx_bytes=3, tx_packets=4) vnics = [(vnic0, stats0)] mgr = manager.AgentManager() pollster = factory() cache = { pollster.CACHE_KEY_VNIC: { self.instance.id: vnics, }, } samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) def test_incoming_bytes(self): self._check_get_samples_cache(net.IncomingBytesPollster) def test_outgoing_bytes(self): self._check_get_samples_cache(net.OutgoingBytesPollster) def test_incoming_packets(self): self._check_get_samples_cache(net.IncomingPacketsPollster) def test_outgoing_packets(self): self._check_get_samples_cache(net.OutgoingPacketsPollster) class TestNetRatesPollster(base.TestPollsterBase): def setUp(self): super(TestNetRatesPollster, self).setUp() self.vnic0 = virt_inspector.Interface( name='vnet0', fref='fa163e71ec6e', mac='fa:16:3e:71:ec:6d', parameters=dict(ip='10.0.0.2', projmask='255.255.255.0', projnet='proj1', dhcp_server='10.0.0.1')) stats0 = virt_inspector.InterfaceRateStats(rx_bytes_rate=1, tx_bytes_rate=2) self.vnic1 = virt_inspector.Interface( name='vnet1', fref='fa163e71ec6f', mac='fa:16:3e:71:ec:6e', parameters=dict(ip='192.168.0.3', projmask='255.255.255.0', projnet='proj2', dhcp_server='10.0.0.2')) stats1 = virt_inspector.InterfaceRateStats(rx_bytes_rate=3, tx_bytes_rate=4) self.vnic2 = virt_inspector.Interface( name='vnet2', fref=None, mac='fa:18:4e:72:fc:7e', parameters=dict(ip='192.168.0.4', projmask='255.255.255.0', projnet='proj3', dhcp_server='10.0.0.3')) stats2 = virt_inspector.InterfaceRateStats(rx_bytes_rate=5, tx_bytes_rate=6) vnics = [ (self.vnic0, stats0), (self.vnic1, stats1), (self.vnic2, stats2), ] self.inspector.inspect_vnic_rates = mock.Mock(return_value=vnics) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def _check_get_samples(self, factory, expected): mgr = manager.AgentManager() pollster = factory() samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(3, len(samples)) # one for each nic self.assertEqual(set([samples[0].name]), set([s.name for s in samples])) def _verify_vnic_metering(ip, expected_volume, expected_rid): match = [s for s in samples if s.resource_metadata['parameters']['ip'] == ip ] self.assertEqual(1, len(match), 'missing ip %s' % ip) self.assertEqual(expected_volume, match[0].volume) self.assertEqual('gauge', match[0].type) self.assertEqual(expected_rid, match[0].resource_id) for ip, volume, rid in expected: _verify_vnic_metering(ip, volume, rid) def test_incoming_bytes_rate(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.IncomingBytesRatePollster, [('10.0.0.2', 1, self.vnic0.fref), ('192.168.0.3', 3, self.vnic1.fref), ('192.168.0.4', 5, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) def test_outgoing_bytes_rate(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.OutgoingBytesRatePollster, [('10.0.0.2', 2, self.vnic0.fref), ('192.168.0.3', 4, self.vnic1.fref), ('192.168.0.4', 6, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) ceilometer-6.1.5/ceilometer/tests/unit/compute/pollsters/__init__.py0000664000567000056710000000000013072744703027076 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/compute/pollsters/test_instance.py0000664000567000056710000000724013072744706030222 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as fixture_config from ceilometer.agent import manager from ceilometer.compute.pollsters import instance as pollsters_instance from ceilometer.tests.unit.compute.pollsters import base class TestInstancePollster(base.TestPollsterBase): def setUp(self): super(TestInstancePollster, self).setUp() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples_instance(self): mgr = manager.AgentManager() pollster = pollsters_instance.InstancePollster() samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual('instance', samples[0].name) self.assertEqual(1, samples[0].resource_metadata['vcpus']) self.assertEqual(512, samples[0].resource_metadata['memory_mb']) self.assertEqual(20, samples[0].resource_metadata['disk_gb']) self.assertEqual(20, samples[0].resource_metadata['root_gb']) self.assertEqual(0, samples[0].resource_metadata['ephemeral_gb']) self.assertEqual('active', samples[0].resource_metadata['status']) self.assertEqual('active', samples[0].resource_metadata['state']) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_reserved_metadata_with_keys(self): self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF.set_override('reserved_metadata_keys', ['fqdn']) mgr = manager.AgentManager() pollster = pollsters_instance.InstancePollster() samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual({'fqdn': 'vm_fqdn', 'stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128'}, samples[0].resource_metadata['user_metadata']) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_reserved_metadata_with_namespace(self): mgr = manager.AgentManager() pollster = pollsters_instance.InstancePollster() samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual({'stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128'}, samples[0].resource_metadata['user_metadata']) self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF.set_override('reserved_metadata_namespace', []) mgr = manager.AgentManager() pollster = pollsters_instance.InstancePollster() samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertNotIn('user_metadata', samples[0].resource_metadata) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_flavor_name_as_metadata_instance_type(self): mgr = manager.AgentManager() pollster = pollsters_instance.InstancePollster() samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual('m1.small', samples[0].resource_metadata['instance_type']) ceilometer-6.1.5/ceilometer/tests/unit/compute/test_discovery.py0000664000567000056710000001001113072744706026364 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 import mock from oslo_config import fixture as fixture_config from oslotest import mockpatch from ceilometer.compute import discovery import ceilometer.tests.base as base class TestDiscovery(base.BaseTestCase): def setUp(self): super(TestDiscovery, self).setUp() self.instance = mock.MagicMock() self.instance.name = 'instance-00000001' setattr(self.instance, 'OS-EXT-SRV-ATTR:instance_name', self.instance.name) setattr(self.instance, 'OS-EXT-STS:vm_state', 'active') self.instance.id = 1 self.instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, 'ram': 512, 'disk': 20, 'ephemeral': 0} self.instance.status = 'active' self.instance.metadata = { 'fqdn': 'vm_fqdn', 'metering.stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128', 'project_cos': 'dev'} # as we're having lazy hypervisor inspector singleton object in the # base compute pollster class, that leads to the fact that we # need to mock all this class property to avoid context sharing between # the tests self.client = mock.MagicMock() self.client.instance_get_all_by_host.return_value = [self.instance] patch_client = mockpatch.Patch('ceilometer.nova_client.Client', return_value=self.client) self.useFixture(patch_client) self.utc_now = mock.MagicMock( return_value=datetime.datetime(2016, 1, 1, tzinfo=iso8601.iso8601.UTC)) patch_timeutils = mockpatch.Patch('oslo_utils.timeutils.utcnow', self.utc_now) self.useFixture(patch_timeutils) self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF.set_override('host', 'test') def test_normal_discovery(self): dsc = discovery.InstanceDiscovery() resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) self.assertEqual(1, list(resources)[0].id) self.client.instance_get_all_by_host.assert_called_once_with( 'test', None) resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) self.assertEqual(1, list(resources)[0].id) self.client.instance_get_all_by_host.assert_called_with( self.CONF.host, "2016-01-01T00:00:00+00:00") def test_discovery_with_resource_update_interval(self): self.CONF.set_override("resource_update_interval", 600, group="compute") dsc = discovery.InstanceDiscovery() dsc.last_run = datetime.datetime(2016, 1, 1, tzinfo=iso8601.iso8601.UTC) self.utc_now.return_value = datetime.datetime( 2016, 1, 1, minute=5, tzinfo=iso8601.iso8601.UTC) resources = dsc.discover(mock.MagicMock()) self.assertEqual(0, len(resources)) self.client.instance_get_all_by_host.assert_not_called() self.utc_now.return_value = datetime.datetime( 2016, 1, 1, minute=20, tzinfo=iso8601.iso8601.UTC) resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) self.assertEqual(1, list(resources)[0].id) self.client.instance_get_all_by_host.assert_called_once_with( self.CONF.host, "2016-01-01T00:00:00+00:00") ceilometer-6.1.5/ceilometer/tests/unit/compute/virt/0000775000567000056710000000000013072745164023736 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/compute/virt/libvirt/0000775000567000056710000000000013072745164025411 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py0000664000567000056710000004143013072744706031033 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for libvirt inspector. """ try: import contextlib2 as contextlib # for Python < 3.3 except ImportError: import contextlib import fixtures import mock from oslo_utils import units from oslotest import base from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.compute.virt.libvirt import inspector as libvirt_inspector class TestLibvirtInspection(base.BaseTestCase): class fakeLibvirtError(Exception): pass def setUp(self): super(TestLibvirtInspection, self).setUp() class VMInstance(object): id = 'ff58e738-12f4-4c58-acde-77617b68da56' name = 'instance-00000001' self.instance = VMInstance self.inspector = libvirt_inspector.LibvirtInspector() self.inspector.connection = mock.Mock() libvirt_inspector.libvirt = mock.Mock() libvirt_inspector.libvirt.VIR_DOMAIN_SHUTOFF = 5 libvirt_inspector.libvirt.libvirtError = self.fakeLibvirtError self.domain = mock.Mock() self.addCleanup(mock.patch.stopall) def test_inspect_cpus(self): with contextlib.ExitStack() as stack: stack.enter_context(mock.patch.object(self.inspector.connection, 'lookupByUUIDString', return_value=self.domain)) stack.enter_context(mock.patch.object(self.domain, 'info', return_value=(0, 0, 0, 2, 999999))) cpu_info = self.inspector.inspect_cpus(self.instance) self.assertEqual(2, cpu_info.number) self.assertEqual(999999, cpu_info.time) def test_inspect_cpus_with_domain_shutoff(self): connection = self.inspector.connection with mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain): with mock.patch.object(self.domain, 'info', return_value=(5, 0, 0, 2, 999999)): self.assertRaises(virt_inspector.InstanceShutOffException, self.inspector.inspect_cpus, self.instance) def test_inspect_vnics(self): dom_xml = """
""" interface_stats = { 'vnet0': (1, 2, 0, 0, 3, 4, 0, 0), 'vnet1': (5, 6, 0, 0, 7, 8, 0, 0), 'vnet2': (9, 10, 0, 0, 11, 12, 0, 0), } interfaceStats = interface_stats.__getitem__ connection = self.inspector.connection with contextlib.ExitStack() as stack: stack.enter_context(mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain)) stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', return_value=dom_xml)) stack.enter_context(mock.patch.object(self.domain, 'interfaceStats', side_effect=interfaceStats)) stack.enter_context(mock.patch.object(self.domain, 'info', return_value=(0, 0, 0, 2, 999999))) interfaces = list(self.inspector.inspect_vnics(self.instance)) self.assertEqual(3, len(interfaces)) vnic0, info0 = interfaces[0] self.assertEqual('vnet0', vnic0.name) self.assertEqual('fa:16:3e:71:ec:6d', vnic0.mac) self.assertEqual('nova-instance-00000001-fa163e71ec6d', vnic0.fref) self.assertEqual('255.255.255.0', vnic0.parameters.get('projmask')) self.assertEqual('10.0.0.2', vnic0.parameters.get('ip')) self.assertEqual('10.0.0.0', vnic0.parameters.get('projnet')) self.assertEqual('10.0.0.1', vnic0.parameters.get('dhcpserver')) self.assertEqual(1, info0.rx_bytes) self.assertEqual(2, info0.rx_packets) self.assertEqual(3, info0.tx_bytes) self.assertEqual(4, info0.tx_packets) vnic1, info1 = interfaces[1] self.assertEqual('vnet1', vnic1.name) self.assertEqual('fa:16:3e:71:ec:6e', vnic1.mac) self.assertEqual('nova-instance-00000001-fa163e71ec6e', vnic1.fref) self.assertEqual('255.255.255.0', vnic1.parameters.get('projmask')) self.assertEqual('192.168.0.2', vnic1.parameters.get('ip')) self.assertEqual('192.168.0.0', vnic1.parameters.get('projnet')) self.assertEqual('192.168.0.1', vnic1.parameters.get('dhcpserver')) self.assertEqual(5, info1.rx_bytes) self.assertEqual(6, info1.rx_packets) self.assertEqual(7, info1.tx_bytes) self.assertEqual(8, info1.tx_packets) vnic2, info2 = interfaces[2] self.assertEqual('vnet2', vnic2.name) self.assertEqual('fa:16:3e:96:33:f0', vnic2.mac) self.assertIsNone(vnic2.fref) self.assertEqual(dict(), vnic2.parameters) self.assertEqual(9, info2.rx_bytes) self.assertEqual(10, info2.rx_packets) self.assertEqual(11, info2.tx_bytes) self.assertEqual(12, info2.tx_packets) def test_inspect_vnics_with_domain_shutoff(self): connection = self.inspector.connection with contextlib.ExitStack() as stack: stack.enter_context(mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain)) stack.enter_context(mock.patch.object(self.domain, 'info', return_value=(5, 0, 0, 2, 999999))) inspect = self.inspector.inspect_vnics self.assertRaises(virt_inspector.InstanceShutOffException, list, inspect(self.instance)) def test_inspect_disks(self): dom_xml = """
""" with contextlib.ExitStack() as stack: stack.enter_context(mock.patch.object(self.inspector.connection, 'lookupByUUIDString', return_value=self.domain)) stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', return_value=dom_xml)) stack.enter_context(mock.patch.object(self.domain, 'blockStats', return_value=(1, 2, 3, 4, -1))) stack.enter_context(mock.patch.object(self.domain, 'info', return_value=(0, 0, 0, 2, 999999))) disks = list(self.inspector.inspect_disks(self.instance)) self.assertEqual(1, len(disks)) disk0, info0 = disks[0] self.assertEqual('vda', disk0.device) self.assertEqual(1, info0.read_requests) self.assertEqual(2, info0.read_bytes) self.assertEqual(3, info0.write_requests) self.assertEqual(4, info0.write_bytes) def test_inspect_disks_with_domain_shutoff(self): connection = self.inspector.connection with contextlib.ExitStack() as stack: stack.enter_context(mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain)) stack.enter_context(mock.patch.object(self.domain, 'info', return_value=(5, 0, 0, 2, 999999))) inspect = self.inspector.inspect_disks self.assertRaises(virt_inspector.InstanceShutOffException, list, inspect(self.instance)) def test_inspect_memory_usage(self): fake_memory_stats = {'available': 51200, 'unused': 25600} connection = self.inspector.connection with mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain): with mock.patch.object(self.domain, 'info', return_value=(0, 0, 51200, 2, 999999)): with mock.patch.object(self.domain, 'memoryStats', return_value=fake_memory_stats): memory = self.inspector.inspect_memory_usage( self.instance) self.assertEqual(25600 / units.Ki, memory.usage) def test_inspect_disk_info(self): dom_xml = """
""" with contextlib.ExitStack() as stack: stack.enter_context(mock.patch.object(self.inspector.connection, 'lookupByUUIDString', return_value=self.domain)) stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', return_value=dom_xml)) stack.enter_context(mock.patch.object(self.domain, 'blockInfo', return_value=(1, 2, 3, -1))) stack.enter_context(mock.patch.object(self.domain, 'info', return_value=(0, 0, 0, 2, 999999))) disks = list(self.inspector.inspect_disk_info(self.instance)) self.assertEqual(1, len(disks)) disk0, info0 = disks[0] self.assertEqual('vda', disk0.device) self.assertEqual(1, info0.capacity) self.assertEqual(2, info0.allocation) self.assertEqual(3, info0.physical) def test_inspect_memory_usage_with_domain_shutoff(self): connection = self.inspector.connection with mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain): with mock.patch.object(self.domain, 'info', return_value=(5, 0, 0, 2, 999999)): self.assertRaises(virt_inspector.InstanceShutOffException, self.inspector.inspect_memory_usage, self.instance) def test_inspect_memory_usage_with_empty_stats(self): connection = self.inspector.connection with mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain): with mock.patch.object(self.domain, 'info', return_value=(0, 0, 51200, 2, 999999)): with mock.patch.object(self.domain, 'memoryStats', return_value={}): self.assertRaises(virt_inspector.NoDataException, self.inspector.inspect_memory_usage, self.instance) class TestLibvirtInspectionWithError(base.BaseTestCase): class fakeLibvirtError(Exception): pass def setUp(self): super(TestLibvirtInspectionWithError, self).setUp() self.inspector = libvirt_inspector.LibvirtInspector() self.useFixture(fixtures.MonkeyPatch( 'ceilometer.compute.virt.libvirt.inspector.' 'LibvirtInspector._get_connection', self._dummy_get_connection)) libvirt_inspector.libvirt = mock.Mock() libvirt_inspector.libvirt.libvirtError = self.fakeLibvirtError @staticmethod def _dummy_get_connection(*args, **kwargs): raise Exception('dummy') def test_inspect_unknown_error(self): self.assertRaises(virt_inspector.InspectorException, self.inspector.inspect_cpus, 'foo') ceilometer-6.1.5/ceilometer/tests/unit/compute/virt/libvirt/__init__.py0000664000567000056710000000000013072744703027506 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/compute/virt/xenapi/0000775000567000056710000000000013072745164025222 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py0000664000567000056710000001522013072744706030642 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for xenapi inspector. """ import mock from oslotest import base from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.compute.virt.xenapi import inspector as xenapi_inspector class TestSwapXapiHost(base.BaseTestCase): def test_swapping(self): self.assertEqual( "http://otherserver:8765/somepath", xenapi_inspector.swap_xapi_host( "http://someserver:8765/somepath", 'otherserver')) def test_no_port(self): self.assertEqual( "http://otherserver/somepath", xenapi_inspector.swap_xapi_host( "http://someserver/somepath", 'otherserver')) def test_no_path(self): self.assertEqual( "http://otherserver", xenapi_inspector.swap_xapi_host( "http://someserver", 'otherserver')) def test_same_hostname_path(self): self.assertEqual( "http://other:80/some", xenapi_inspector.swap_xapi_host( "http://some:80/some", 'other')) class TestXenapiInspection(base.BaseTestCase): def setUp(self): api_session = mock.Mock() xenapi_inspector.get_api_session = mock.Mock(return_value=api_session) self.inspector = xenapi_inspector.XenapiInspector() super(TestXenapiInspection, self).setUp() def test_inspect_cpu_util(self): fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', 'id': 'fake_instance_id'} fake_stat = virt_inspector.CPUUtilStats(util=40) def fake_xenapi_request(method, args): metrics_rec = { 'memory_actual': '536870912', 'VCPUs_number': '1', 'VCPUs_utilisation': {'0': 0.4, } } if method == 'VM.get_by_name_label': return ['vm_ref'] elif method == 'VM.get_metrics': return 'metrics_ref' elif method == 'VM_metrics.get_record': return metrics_rec else: return None session = self.inspector.session with mock.patch.object(session, 'xenapi_request', side_effect=fake_xenapi_request): cpu_util_stat = self.inspector.inspect_cpu_util(fake_instance) self.assertEqual(fake_stat, cpu_util_stat) def test_inspect_memory_usage(self): fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', 'id': 'fake_instance_id'} fake_stat = virt_inspector.MemoryUsageStats(usage=128) def fake_xenapi_request(method, args): metrics_rec = { 'memory_actual': '134217728', } if method == 'VM.get_by_name_label': return ['vm_ref'] elif method == 'VM.get_metrics': return 'metrics_ref' elif method == 'VM_metrics.get_record': return metrics_rec else: return None session = self.inspector.session with mock.patch.object(session, 'xenapi_request', side_effect=fake_xenapi_request): memory_stat = self.inspector.inspect_memory_usage(fake_instance) self.assertEqual(fake_stat, memory_stat) def test_inspect_vnic_rates(self): fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', 'id': 'fake_instance_id'} def fake_xenapi_request(method, args): vif_rec = { 'metrics': 'vif_metrics_ref', 'uuid': 'vif_uuid', 'MAC': 'vif_mac', } vif_metrics_rec = { 'io_read_kbs': '1', 'io_write_kbs': '2', } if method == 'VM.get_by_name_label': return ['vm_ref'] elif method == 'VM.get_VIFs': return ['vif_ref'] elif method == 'VIF.get_record': return vif_rec elif method == 'VIF.get_metrics': return 'vif_metrics_ref' elif method == 'VIF_metrics.get_record': return vif_metrics_rec else: return None session = self.inspector.session with mock.patch.object(session, 'xenapi_request', side_effect=fake_xenapi_request): interfaces = list(self.inspector.inspect_vnic_rates(fake_instance)) self.assertEqual(1, len(interfaces)) vnic0, info0 = interfaces[0] self.assertEqual('vif_uuid', vnic0.name) self.assertEqual('vif_mac', vnic0.mac) self.assertEqual(1024, info0.rx_bytes_rate) self.assertEqual(2048, info0.tx_bytes_rate) def test_inspect_disk_rates(self): fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', 'id': 'fake_instance_id'} def fake_xenapi_request(method, args): vbd_rec = { 'device': 'xvdd' } vbd_metrics_rec = { 'io_read_kbs': '1', 'io_write_kbs': '2' } if method == 'VM.get_by_name_label': return ['vm_ref'] elif method == 'VM.get_VBDs': return ['vbd_ref'] elif method == 'VBD.get_record': return vbd_rec elif method == 'VBD.get_metrics': return 'vbd_metrics_ref' elif method == 'VBD_metrics.get_record': return vbd_metrics_rec else: return None session = self.inspector.session with mock.patch.object(session, 'xenapi_request', side_effect=fake_xenapi_request): disks = list(self.inspector.inspect_disk_rates(fake_instance)) self.assertEqual(1, len(disks)) disk0, info0 = disks[0] self.assertEqual('xvdd', disk0.device) self.assertEqual(1024, info0.read_bytes_rate) self.assertEqual(2048, info0.write_bytes_rate) ceilometer-6.1.5/ceilometer/tests/unit/compute/virt/xenapi/__init__.py0000664000567000056710000000000013072744703027317 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/compute/virt/vmware/0000775000567000056710000000000013072745164025237 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py0000664000567000056710000001535713072744706032603 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_vmware import api from oslotest import base from ceilometer.compute.virt.vmware import vsphere_operations class VsphereOperationsTest(base.BaseTestCase): def setUp(self): api_session = api.VMwareAPISession("test_server", "test_user", "test_password", 0, None, create_session=False) api_session._vim = mock.MagicMock() self._vsphere_ops = vsphere_operations.VsphereOperations(api_session, 1000) super(VsphereOperationsTest, self).setUp() def test_get_vm_moid(self): vm1_moid = "vm-1" vm2_moid = "vm-2" vm1_instance = "0a651a71-142c-4813-aaa6-42e5d5c80d85" vm2_instance = "db1d2533-6bef-4cb2-aef3-920e109f5693" def construct_mock_vm_object(vm_moid, vm_instance): vm_object = mock.MagicMock() vm_object.obj.value = vm_moid vm_object.propSet[0].val = vm_instance return vm_object def retrieve_props_side_effect(pc, specSet, options): # assert inputs self.assertEqual(self._vsphere_ops._max_objects, options.maxObjects) self.assertEqual(vsphere_operations.VM_INSTANCE_ID_PROPERTY, specSet[0].pathSet[0]) # mock return result vm1 = construct_mock_vm_object(vm1_moid, vm1_instance) vm2 = construct_mock_vm_object(vm2_moid, vm2_instance) result = mock.MagicMock() result.objects.__iter__.return_value = [vm1, vm2] return result vim_mock = self._vsphere_ops._api_session._vim vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect vim_mock.ContinueRetrievePropertiesEx.return_value = None vm_moid = self._vsphere_ops.get_vm_moid(vm1_instance) self.assertEqual(vm1_moid, vm_moid) vm_moid = self._vsphere_ops.get_vm_moid(vm2_instance) self.assertEqual(vm2_moid, vm_moid) def test_query_vm_property(self): vm_moid = "vm-21" vm_property_name = "runtime.powerState" vm_property_val = "poweredON" def retrieve_props_side_effect(pc, specSet, options): # assert inputs self.assertEqual(vm_moid, specSet[0].obj.value) self.assertEqual(vm_property_name, specSet[0].pathSet[0]) # mock return result result = mock.MagicMock() result.objects[0].propSet[0].val = vm_property_val return result vim_mock = self._vsphere_ops._api_session._vim vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect actual_val = self._vsphere_ops.query_vm_property(vm_moid, vm_property_name) self.assertEqual(vm_property_val, actual_val) def test_get_perf_counter_id(self): def construct_mock_counter_info(group_name, counter_name, rollup_type, counter_id): counter_info = mock.MagicMock() counter_info.groupInfo.key = group_name counter_info.nameInfo.key = counter_name counter_info.rollupType = rollup_type counter_info.key = counter_id return counter_info def retrieve_props_side_effect(pc, specSet, options): # assert inputs self.assertEqual(vsphere_operations.PERF_COUNTER_PROPERTY, specSet[0].pathSet[0]) # mock return result counter_info1 = construct_mock_counter_info("a", "b", "c", 1) counter_info2 = construct_mock_counter_info("x", "y", "z", 2) result = mock.MagicMock() (result.objects[0].propSet[0].val.PerfCounterInfo.__iter__. return_value) = [counter_info1, counter_info2] return result vim_mock = self._vsphere_ops._api_session._vim vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect counter_id = self._vsphere_ops.get_perf_counter_id("a:b:c") self.assertEqual(1, counter_id) counter_id = self._vsphere_ops.get_perf_counter_id("x:y:z") self.assertEqual(2, counter_id) def test_query_vm_stats(self): vm_moid = "vm-21" device1 = "device-1" device2 = "device-2" device3 = "device-3" counter_id = 5 def construct_mock_metric_series(device_name, stat_values): metric_series = mock.MagicMock() metric_series.value = stat_values metric_series.id.instance = device_name return metric_series def vim_query_perf_side_effect(perf_manager, querySpec): # assert inputs self.assertEqual(vm_moid, querySpec[0].entity.value) self.assertEqual(counter_id, querySpec[0].metricId[0].counterId) self.assertEqual(vsphere_operations.VC_REAL_TIME_SAMPLING_INTERVAL, querySpec[0].intervalId) # mock return result perf_stats = mock.MagicMock() perf_stats[0].sampleInfo = ["s1", "s2", "s3"] perf_stats[0].value.__iter__.return_value = [ construct_mock_metric_series(None, [111, 222, 333]), construct_mock_metric_series(device1, [100, 200, 300]), construct_mock_metric_series(device2, [10, 20, 30]), construct_mock_metric_series(device3, [1, 2, 3]) ] return perf_stats vim_mock = self._vsphere_ops._api_session._vim vim_mock.QueryPerf.side_effect = vim_query_perf_side_effect ops = self._vsphere_ops # test aggregate stat stat_val = ops.query_vm_aggregate_stats(vm_moid, counter_id, 60) self.assertEqual(222, stat_val) # test per-device(non-aggregate) stats expected_device_stats = { device1: 200, device2: 20, device3: 2 } stats = ops.query_vm_device_stats(vm_moid, counter_id, 60) self.assertEqual(expected_device_stats, stats) ceilometer-6.1.5/ceilometer/tests/unit/compute/virt/vmware/test_inspector.py0000664000567000056710000001465713072744706030674 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for VMware vSphere inspector. """ import mock from oslo_vmware import api from oslotest import base from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.compute.virt.vmware import inspector as vsphere_inspector class TestVsphereInspection(base.BaseTestCase): def setUp(self): api_session = api.VMwareAPISession("test_server", "test_user", "test_password", 0, None, create_session=False, port=7443) vsphere_inspector.get_api_session = mock.Mock( return_value=api_session) self._inspector = vsphere_inspector.VsphereInspector() self._inspector._ops = mock.MagicMock() super(TestVsphereInspection, self).setUp() def test_inspect_memory_usage(self): fake_instance_moid = 'fake_instance_moid' fake_instance_id = 'fake_instance_id' fake_perf_counter_id = 'fake_perf_counter_id' fake_memory_value = 1024.0 fake_stat = virt_inspector.MemoryUsageStats(usage=1.0) def construct_mock_instance_object(fake_instance_id): instance_object = mock.MagicMock() instance_object.id = fake_instance_id return instance_object fake_instance = construct_mock_instance_object(fake_instance_id) self._inspector._ops.get_vm_moid.return_value = fake_instance_moid (self._inspector._ops. get_perf_counter_id.return_value) = fake_perf_counter_id (self._inspector._ops.query_vm_aggregate_stats. return_value) = fake_memory_value memory_stat = self._inspector.inspect_memory_usage(fake_instance) self.assertEqual(fake_stat, memory_stat) def test_inspect_cpu_util(self): fake_instance_moid = 'fake_instance_moid' fake_instance_id = 'fake_instance_id' fake_perf_counter_id = 'fake_perf_counter_id' fake_cpu_util_value = 60 fake_stat = virt_inspector.CPUUtilStats(util=60) def construct_mock_instance_object(fake_instance_id): instance_object = mock.MagicMock() instance_object.id = fake_instance_id return instance_object fake_instance = construct_mock_instance_object(fake_instance_id) self._inspector._ops.get_vm_moid.return_value = fake_instance_moid (self._inspector._ops.get_perf_counter_id. return_value) = fake_perf_counter_id (self._inspector._ops.query_vm_aggregate_stats. return_value) = fake_cpu_util_value * 100 cpu_util_stat = self._inspector.inspect_cpu_util(fake_instance) self.assertEqual(fake_stat, cpu_util_stat) def test_inspect_vnic_rates(self): # construct test data test_vm_moid = "vm-21" vnic1 = "vnic-1" vnic2 = "vnic-2" counter_name_to_id_map = { vsphere_inspector.VC_NETWORK_RX_COUNTER: 1, vsphere_inspector.VC_NETWORK_TX_COUNTER: 2 } counter_id_to_stats_map = { 1: {vnic1: 1, vnic2: 3}, 2: {vnic1: 2, vnic2: 4}, } def get_counter_id_side_effect(counter_full_name): return counter_name_to_id_map[counter_full_name] def query_stat_side_effect(vm_moid, counter_id, duration): # assert inputs self.assertEqual(test_vm_moid, vm_moid) self.assertIn(counter_id, counter_id_to_stats_map) return counter_id_to_stats_map[counter_id] # configure vsphere operations mock with the test data ops_mock = self._inspector._ops ops_mock.get_vm_moid.return_value = test_vm_moid ops_mock.get_perf_counter_id.side_effect = get_counter_id_side_effect ops_mock.query_vm_device_stats.side_effect = query_stat_side_effect result = self._inspector.inspect_vnic_rates(mock.MagicMock()) # validate result expected_stats = { vnic1: virt_inspector.InterfaceRateStats(1024, 2048), vnic2: virt_inspector.InterfaceRateStats(3072, 4096) } for vnic, rates_info in result: self.assertEqual(expected_stats[vnic.name], rates_info) def test_inspect_disk_rates(self): # construct test data test_vm_moid = "vm-21" disk1 = "disk-1" disk2 = "disk-2" counter_name_to_id_map = { vsphere_inspector.VC_DISK_READ_RATE_CNTR: 1, vsphere_inspector.VC_DISK_READ_REQUESTS_RATE_CNTR: 2, vsphere_inspector.VC_DISK_WRITE_RATE_CNTR: 3, vsphere_inspector.VC_DISK_WRITE_REQUESTS_RATE_CNTR: 4 } counter_id_to_stats_map = { 1: {disk1: 1, disk2: 2}, 2: {disk1: 300, disk2: 400}, 3: {disk1: 5, disk2: 6}, 4: {disk1: 700}, } def get_counter_id_side_effect(counter_full_name): return counter_name_to_id_map[counter_full_name] def query_stat_side_effect(vm_moid, counter_id, duration): # assert inputs self.assertEqual(test_vm_moid, vm_moid) self.assertIn(counter_id, counter_id_to_stats_map) return counter_id_to_stats_map[counter_id] # configure vsphere operations mock with the test data ops_mock = self._inspector._ops ops_mock.get_vm_moid.return_value = test_vm_moid ops_mock.get_perf_counter_id.side_effect = get_counter_id_side_effect ops_mock.query_vm_device_stats.side_effect = query_stat_side_effect result = self._inspector.inspect_disk_rates(mock.MagicMock()) # validate result expected_stats = { disk1: virt_inspector.DiskRateStats(1024, 300, 5120, 700), disk2: virt_inspector.DiskRateStats(2048, 400, 6144, 0) } actual_stats = dict((disk.device, rates) for (disk, rates) in result) self.assertEqual(expected_stats, actual_stats) ceilometer-6.1.5/ceilometer/tests/unit/compute/virt/vmware/__init__.py0000664000567000056710000000000013072744703027334 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/compute/virt/hyperv/0000775000567000056710000000000013072745164025253 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py0000664000567000056710000001735513072744706030706 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Hyper-V inspector. """ import sys import mock from os_win import exceptions as os_win_exc from oslo_utils import units from oslotest import base from ceilometer.compute.virt.hyperv import inspector as hyperv_inspector from ceilometer.compute.virt import inspector as virt_inspector class TestHyperVInspection(base.BaseTestCase): @mock.patch.object(hyperv_inspector, 'utilsfactory', mock.MagicMock()) @mock.patch.object(hyperv_inspector.HyperVInspector, '_compute_host_max_cpu_clock') def setUp(self, mock_compute_host_cpu_clock): self._inspector = hyperv_inspector.HyperVInspector() self._inspector._utils = mock.MagicMock() super(TestHyperVInspection, self).setUp() def test_converted_exception(self): self._inspector._utils.get_cpu_metrics.side_effect = ( os_win_exc.OSWinException) self.assertRaises(virt_inspector.InspectorException, self._inspector.inspect_cpus, mock.sentinel.instance) self._inspector._utils.get_cpu_metrics.side_effect = ( os_win_exc.HyperVException) self.assertRaises(virt_inspector.InspectorException, self._inspector.inspect_cpus, mock.sentinel.instance) self._inspector._utils.get_cpu_metrics.side_effect = ( os_win_exc.NotFound(resource='foofoo')) self.assertRaises(virt_inspector.InstanceNotFoundException, self._inspector.inspect_cpus, mock.sentinel.instance) def test_assert_original_traceback_maintained(self): def bar(self): foo = "foofoo" raise os_win_exc.NotFound(resource=foo) self._inspector._utils.get_cpu_metrics.side_effect = bar try: self._inspector.inspect_cpus(mock.sentinel.instance) self.fail("Test expected exception, but it was not raised.") except virt_inspector.InstanceNotFoundException: # exception has been raised as expected. _, _, trace = sys.exc_info() while trace.tb_next: # iterate until the original exception source, bar. trace = trace.tb_next # original frame will contain the 'foo' variable. self.assertEqual('foofoo', trace.tb_frame.f_locals['foo']) @mock.patch.object(hyperv_inspector, 'utilsfactory') def test_compute_host_max_cpu_clock(self, mock_utilsfactory): mock_cpu = {'MaxClockSpeed': 1000} hostutils = mock_utilsfactory.get_hostutils.return_value.get_cpus_info hostutils.return_value = [mock_cpu, mock_cpu] cpu_clock = self._inspector._compute_host_max_cpu_clock() self.assertEqual(2000.0, cpu_clock) def test_inspect_cpus(self): fake_instance_name = 'fake_instance_name' fake_cpu_clock_used = 2000 fake_cpu_count = 3000 fake_uptime = 4000 self._inspector._host_max_cpu_clock = 4000.0 fake_cpu_percent_used = (fake_cpu_clock_used / self._inspector._host_max_cpu_clock) fake_cpu_time = (int(fake_uptime * fake_cpu_percent_used) * 1000) self._inspector._utils.get_cpu_metrics.return_value = ( fake_cpu_clock_used, fake_cpu_count, fake_uptime) cpu_stats = self._inspector.inspect_cpus(fake_instance_name) self.assertEqual(fake_cpu_count, cpu_stats.number) self.assertEqual(fake_cpu_time, cpu_stats.time) def test_inspect_memory_usage(self): fake_usage = self._inspector._utils.get_memory_metrics.return_value usage = self._inspector.inspect_memory_usage( mock.sentinel.FAKE_INSTANCE, mock.sentinel.FAKE_DURATION) self.assertEqual(fake_usage, usage.usage) def test_inspect_vnics(self): fake_instance_name = 'fake_instance_name' fake_rx_mb = 1000 fake_tx_mb = 2000 fake_element_name = 'fake_element_name' fake_address = 'fake_address' self._inspector._utils.get_vnic_metrics.return_value = [{ 'rx_mb': fake_rx_mb, 'tx_mb': fake_tx_mb, 'element_name': fake_element_name, 'address': fake_address}] inspected_vnics = list(self._inspector.inspect_vnics( fake_instance_name)) self.assertEqual(1, len(inspected_vnics)) self.assertEqual(2, len(inspected_vnics[0])) inspected_vnic, inspected_stats = inspected_vnics[0] self.assertEqual(fake_element_name, inspected_vnic.name) self.assertEqual(fake_address, inspected_vnic.mac) self.assertEqual(fake_rx_mb * units.Mi, inspected_stats.rx_bytes) self.assertEqual(fake_tx_mb * units.Mi, inspected_stats.tx_bytes) def test_inspect_disks(self): fake_instance_name = 'fake_instance_name' fake_read_mb = 1000 fake_write_mb = 2000 fake_instance_id = "fake_fake_instance_id" fake_host_resource = "fake_host_resource" self._inspector._utils.get_disk_metrics.return_value = [{ 'read_mb': fake_read_mb, 'write_mb': fake_write_mb, 'instance_id': fake_instance_id, 'host_resource': fake_host_resource}] inspected_disks = list(self._inspector.inspect_disks( fake_instance_name)) self.assertEqual(1, len(inspected_disks)) self.assertEqual(2, len(inspected_disks[0])) inspected_disk, inspected_stats = inspected_disks[0] self.assertEqual(fake_instance_id, inspected_disk.device) self.assertEqual(fake_read_mb * units.Mi, inspected_stats.read_bytes) self.assertEqual(fake_write_mb * units.Mi, inspected_stats.write_bytes) def test_inspect_disk_latency(self): fake_instance_name = mock.sentinel.INSTANCE_NAME fake_disk_latency = mock.sentinel.DISK_LATENCY fake_instance_id = mock.sentinel.INSTANCE_ID self._inspector._utils.get_disk_latency_metrics.return_value = [{ 'disk_latency': fake_disk_latency, 'instance_id': fake_instance_id}] inspected_disks = list(self._inspector.inspect_disk_latency( fake_instance_name)) self.assertEqual(1, len(inspected_disks)) self.assertEqual(2, len(inspected_disks[0])) inspected_disk, inspected_stats = inspected_disks[0] self.assertEqual(fake_instance_id, inspected_disk.device) self.assertEqual(fake_disk_latency, inspected_stats.disk_latency) def test_inspect_disk_iops_count(self): fake_instance_name = mock.sentinel.INSTANCE_NAME fake_disk_iops_count = mock.sentinel.DISK_IOPS_COUNT fake_instance_id = mock.sentinel.INSTANCE_ID self._inspector._utils.get_disk_iops_count.return_value = [{ 'iops_count': fake_disk_iops_count, 'instance_id': fake_instance_id}] inspected_disks = list(self._inspector.inspect_disk_iops( fake_instance_name)) self.assertEqual(1, len(inspected_disks)) self.assertEqual(2, len(inspected_disks[0])) inspected_disk, inspected_stats = inspected_disks[0] self.assertEqual(fake_instance_id, inspected_disk.device) self.assertEqual(fake_disk_iops_count, inspected_stats.iops_count) ceilometer-6.1.5/ceilometer/tests/unit/compute/virt/hyperv/__init__.py0000664000567000056710000000000013072744703027350 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/compute/virt/__init__.py0000664000567000056710000000000013072744703026033 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/compute/notifications/0000775000567000056710000000000013072745164025623 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/compute/notifications/__init__.py0000664000567000056710000000000013072744706027723 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/compute/notifications/test_instance.py0000664000567000056710000006541513072744706031054 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for converters for producing compute counter messages from notification events. """ from oslotest import base from ceilometer.compute.notifications import instance from ceilometer import sample INSTANCE_CREATE_END = { u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', u'_context_is_admin': True, u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', u'_context_quota_class': None, u'_context_read_deleted': u'no', u'_context_remote_address': u'10.0.2.15', u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', u'_context_roles': [u'admin'], u'_context_timestamp': u'2012-05-08T20:23:41.425105', u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'event_type': u'compute.instance.create.end', u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', u'payload': {u'created_at': u'2012-05-08 20:23:41', u'deleted_at': u'', u'disk_gb': 0, u'display_name': u'testme', u'fixed_ips': [{u'address': u'10.0.0.2', u'floating_ips': [], u'meta': {}, u'type': u'fixed', u'version': 4}], u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', u'instance_type': u'm1.tiny', u'instance_type_id': 2, u'launched_at': u'2012-05-08 20:23:47.985999', u'memory_mb': 512, u'state': u'active', u'state_description': u'', u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', u'vcpus': 1, u'root_gb': 0, u'ephemeral_gb': 0, u'host': u'compute-host-name', u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', u'os_type': u'linux?', u'architecture': u'x86', u'image_ref': u'UUID', u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', }, u'priority': u'INFO', u'publisher_id': u'compute.vagrant-precise', u'timestamp': u'2012-05-08 20:23:48.028195', } INSTANCE_DELETE_START = { u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', u'_context_is_admin': True, u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', u'_context_quota_class': None, u'_context_read_deleted': u'no', u'_context_remote_address': u'10.0.2.15', u'_context_request_id': u'req-fb3c4546-a2e5-49b7-9fd2-a63bd658bc39', u'_context_roles': [u'admin'], u'_context_timestamp': u'2012-05-08T20:24:14.547374', u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'event_type': u'compute.instance.delete.start', u'message_id': u'a15b94ee-cb8e-4c71-9abe-14aa80055fb4', u'payload': {u'created_at': u'2012-05-08 20:23:41', u'deleted_at': u'', u'disk_gb': 0, u'display_name': u'testme', u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', u'instance_type': u'm1.tiny', u'instance_type_id': 2, u'launched_at': u'2012-05-08 20:23:47', u'memory_mb': 512, u'state': u'active', u'state_description': u'deleting', u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', u'vcpus': 1, u'root_gb': 0, u'ephemeral_gb': 0, u'host': u'compute-host-name', u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', u'os_type': u'linux?', u'architecture': u'x86', u'image_ref': u'UUID', u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', }, u'priority': u'INFO', u'publisher_id': u'compute.vagrant-precise', u'timestamp': u'2012-05-08 20:24:14.824743', } INSTANCE_EXISTS = { u'_context_auth_token': None, u'_context_is_admin': True, u'_context_project_id': None, u'_context_quota_class': None, u'_context_read_deleted': u'no', u'_context_remote_address': None, u'_context_request_id': u'req-659a8eb2-4372-4c01-9028-ad6e40b0ed22', u'_context_roles': [u'admin'], u'_context_timestamp': u'2012-05-08T16:03:43.760204', u'_context_user_id': None, u'event_type': u'compute.instance.exists', u'message_id': u'4b884c03-756d-4c06-8b42-80b6def9d302', u'payload': {u'audit_period_beginning': u'2012-05-08 15:00:00', u'audit_period_ending': u'2012-05-08 16:00:00', u'bandwidth': {}, u'created_at': u'2012-05-07 22:16:18', u'deleted_at': u'', u'disk_gb': 0, u'display_name': u'testme', u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', u'instance_id': u'3a513875-95c9-4012-a3e7-f90c678854e5', u'instance_type': u'm1.tiny', u'instance_type_id': 2, u'launched_at': u'2012-05-07 23:01:27', u'memory_mb': 512, u'state': u'active', u'state_description': u'', u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', u'vcpus': 1, u'root_gb': 0, u'ephemeral_gb': 0, u'host': u'compute-host-name', u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', u'os_type': u'linux?', u'architecture': u'x86', u'image_ref': u'UUID', u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', }, u'priority': u'INFO', u'publisher_id': u'compute.vagrant-precise', u'timestamp': u'2012-05-08 16:03:44.122481', } INSTANCE_EXISTS_METADATA_LIST = { u'_context_auth_token': None, u'_context_is_admin': True, u'_context_project_id': None, u'_context_quota_class': None, u'_context_read_deleted': u'no', u'_context_remote_address': None, u'_context_request_id': u'req-659a8eb2-4372-4c01-9028-ad6e40b0ed22', u'_context_roles': [u'admin'], u'_context_timestamp': u'2012-05-08T16:03:43.760204', u'_context_user_id': None, u'event_type': u'compute.instance.exists', u'message_id': u'4b884c03-756d-4c06-8b42-80b6def9d302', u'payload': {u'audit_period_beginning': u'2012-05-08 15:00:00', u'audit_period_ending': u'2012-05-08 16:00:00', u'bandwidth': {}, u'created_at': u'2012-05-07 22:16:18', u'deleted_at': u'', u'disk_gb': 0, u'display_name': u'testme', u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', u'instance_id': u'3a513875-95c9-4012-a3e7-f90c678854e5', u'instance_type': u'm1.tiny', u'instance_type_id': 2, u'launched_at': u'2012-05-07 23:01:27', u'memory_mb': 512, u'state': u'active', u'state_description': u'', u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', u'vcpus': 1, u'root_gb': 0, u'metadata': [], u'ephemeral_gb': 0, u'host': u'compute-host-name', u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', u'os_type': u'linux?', u'architecture': u'x86', u'image_ref': u'UUID', u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', }, u'priority': u'INFO', u'publisher_id': u'compute.vagrant-precise', u'timestamp': u'2012-05-08 16:03:44.122481', } INSTANCE_FINISH_RESIZE_END = { u'_context_roles': [u'admin'], u'_context_request_id': u'req-e3f71bb9-e9b9-418b-a9db-a5950c851b25', u'_context_quota_class': None, u'event_type': u'compute.instance.finish_resize.end', u'_context_user_name': u'admin', u'_context_project_name': u'admin', u'timestamp': u'2013-01-04 15:10:17.436974', u'_context_is_admin': True, u'message_id': u'a2f7770d-b85d-4797-ab10-41407a44368e', u'_context_auth_token': None, u'_context_instance_lock_checked': False, u'_context_project_id': u'cea4b25edb484e5392727181b7721d29', u'_context_timestamp': u'2013-01-04T15:08:39.162612', u'_context_read_deleted': u'no', u'_context_user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', u'_context_remote_address': u'10.147.132.184', u'publisher_id': u'compute.ip-10-147-132-184.ec2.internal', u'payload': {u'state_description': u'', u'availability_zone': None, u'ephemeral_gb': 0, u'instance_type_id': 5, u'deleted_at': u'', u'fixed_ips': [{u'floating_ips': [], u'label': u'private', u'version': 4, u'meta': {}, u'address': u'10.0.0.3', u'type': u'fixed'}], u'memory_mb': 2048, u'user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', u'reservation_id': u'r-u3fvim06', u'hostname': u's1', u'state': u'resized', u'launched_at': u'2013-01-04T15:10:14.923939', u'metadata': {u'metering.server_group': u'Group_A', u'AutoScalingGroupName': u'tyky-Group_Awste7', u'metering.foo.bar': u'true'}, u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a', u'access_ip_v6': None, u'disk_gb': 20, u'access_ip_v4': None, u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', u'host': u'ip-10-147-132-184.ec2.internal', u'display_name': u's1', u'image_ref_url': u'http://10.147.132.184:9292/images/' 'a130b9d9-e00e-436e-9782-836ccef06e8a', u'root_gb': 20, u'tenant_id': u'cea4b25edb484e5392727181b7721d29', u'created_at': u'2013-01-04T11:21:48.000000', u'instance_id': u'648e8963-6886-4c3c-98f9-4511c292f86b', u'instance_type': u'm1.small', u'vcpus': 1, u'image_meta': {u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a', u'base_image_ref': u'a130b9d9-e00e-436e-9782-836ccef06e8a'}, u'architecture': None, u'os_type': None }, u'priority': u'INFO' } INSTANCE_RESIZE_REVERT_END = { u'_context_roles': [u'admin'], u'_context_request_id': u'req-9da1d714-dabe-42fd-8baa-583e57cd4f1a', u'_context_quota_class': None, u'event_type': u'compute.instance.resize.revert.end', u'_context_user_name': u'admin', u'_context_project_name': u'admin', u'timestamp': u'2013-01-04 15:20:32.009532', u'_context_is_admin': True, u'message_id': u'c48deeba-d0c3-4154-b3db-47480b52267a', u'_context_auth_token': None, u'_context_instance_lock_checked': False, u'_context_project_id': u'cea4b25edb484e5392727181b7721d29', u'_context_timestamp': u'2013-01-04T15:19:51.018218', u'_context_read_deleted': u'no', u'_context_user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', u'_context_remote_address': u'10.147.132.184', u'publisher_id': u'compute.ip-10-147-132-184.ec2.internal', u'payload': {u'state_description': u'resize_reverting', u'availability_zone': None, u'ephemeral_gb': 0, u'instance_type_id': 2, u'deleted_at': u'', u'reservation_id': u'r-u3fvim06', u'memory_mb': 512, u'user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', u'hostname': u's1', u'state': u'resized', u'launched_at': u'2013-01-04T15:10:14.000000', u'metadata': {u'metering.server_group': u'Group_A', u'AutoScalingGroupName': u'tyky-Group_A-wste7', u'metering.foo.bar': u'true'}, u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a', u'access_ip_v6': None, u'disk_gb': 0, u'access_ip_v4': None, u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', u'host': u'ip-10-147-132-184.ec2.internal', u'display_name': u's1', u'image_ref_url': u'http://10.147.132.184:9292/images/' 'a130b9d9-e00e-436e-9782-836ccef06e8a', u'root_gb': 0, u'tenant_id': u'cea4b25edb484e5392727181b7721d29', u'created_at': u'2013-01-04T11:21:48.000000', u'instance_id': u'648e8963-6886-4c3c-98f9-4511c292f86b', u'instance_type': u'm1.tiny', u'vcpus': 1, u'image_meta': {u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a', u'base_image_ref': u'a130b9d9-e00e-436e-9782-836ccef06e8a'}, u'architecture': None, u'os_type': None }, u'priority': u'INFO' } INSTANCE_SCHEDULED = { u'_context_request_id': u'req-f28a836a-32bf-4cc3-940a-3515878c181f', u'_context_quota_class': None, u'event_type': u'scheduler.run_instance.scheduled', u'_context_service_catalog': [{ u'endpoints': [{ u'adminURL': u'http://172.16.12.21:8776/v1/2bd766a095b44486bf07cf7f666997eb', u'region': u'RegionOne', u'internalURL': u'http://172.16.12.21:8776/v1/2bd766a095b44486bf07cf7f666997eb', u'id': u'30cb904fdc294eea9b225e06b2d0d4eb', u'publicURL': u'http://172.16.12.21:8776/v1/2bd766a095b44486bf07cf7f666997eb'}], u'endpoints_links': [], u'type': u'volume', u'name': u'cinder'}], u'_context_auth_token': u'TOK', u'_context_user_id': u'0a757cd896b64b65ba3784afef564116', u'payload': { 'instance_id': 'fake-uuid1-1', u'weighted_host': {u'host': u'eglynn-f19-devstack3', u'weight': 1.0}, u'request_spec': { u'num_instances': 1, u'block_device_mapping': [{ u'instance_uuid': u'9206baae-c3b6-41bc-96f2-2c0726ff51c8', u'guest_format': None, u'boot_index': 0, u'no_device': None, u'connection_info': None, u'volume_id': None, u'volume_size': None, u'device_name': None, u'disk_bus': None, u'image_id': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b', u'source_type': u'image', u'device_type': u'disk', u'snapshot_id': None, u'destination_type': u'local', u'delete_on_termination': True}], u'image': { u'status': u'active', u'name': u'cirros-0.3.1-x86_64-uec', u'deleted': False, u'container_format': u'ami', u'created_at': u'2014-02-18T13:16:26.000000', u'disk_format': u'ami', u'updated_at': u'2014-02-18T13:16:27.000000', u'properties': { u'kernel_id': u'c8794c1a-4158-42cc-9f97-d0d250c9c6a4', u'ramdisk_id': u'4999726c-545c-4a9e-bfc0-917459784275'}, u'min_disk': 0, u'min_ram': 0, u'checksum': u'f8a2eeee2dc65b3d9b6e63678955bd83', u'owner': u'2bd766a095b44486bf07cf7f666997eb', u'is_public': True, u'deleted_at': None, u'id': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b', u'size': 25165824}, u'instance_type': { u'root_gb': 1, u'name': u'm1.tiny', u'ephemeral_gb': 0, u'memory_mb': 512, u'vcpus': 1, u'extra_specs': {}, u'swap': 0, u'rxtx_factor': 1.0, u'flavorid': u'1', u'vcpu_weight': None, u'id': 2}, u'instance_properties': { u'vm_state': u'building', u'availability_zone': None, u'terminated_at': None, u'ephemeral_gb': 0, u'instance_type_id': 2, u'user_data': None, u'cleaned': False, u'vm_mode': None, u'deleted_at': None, u'reservation_id': u'r-ven5q6om', u'id': 15, u'security_groups': [{ u'deleted_at': None, u'user_id': u'0a757cd896b64b65ba3784afef564116', u'description': u'default', u'deleted': False, u'created_at': u'2014-02-19T11:02:31.000000', u'updated_at': None, u'project_id': u'2bd766a095b44486bf07cf7f666997eb', u'id': 1, u'name': u'default'}], u'disable_terminate': False, u'root_device_name': None, u'display_name': u'new', u'uuid': u'9206baae-c3b6-41bc-96f2-2c0726ff51c8', u'default_swap_device': None, u'info_cache': { u'instance_uuid': u'9206baae-c3b6-41bc-96f2-2c0726ff51c8', u'deleted': False, u'created_at': u'2014-03-05T12:44:00.000000', u'updated_at': None, u'network_info': [], u'deleted_at': None}, u'hostname': u'new', u'launched_on': None, u'display_description': u'new', u'key_data': None, u'deleted': False, u'config_drive': u'', u'power_state': 0, u'default_ephemeral_device': None, u'progress': 0, u'project_id': u'2bd766a095b44486bf07cf7f666997eb', u'launched_at': None, u'scheduled_at': None, u'node': None, u'ramdisk_id': u'4999726c-545c-4a9e-bfc0-917459784275', u'access_ip_v6': None, u'access_ip_v4': None, u'kernel_id': u'c8794c1a-4158-42cc-9f97-d0d250c9c6a4', u'key_name': None, u'updated_at': None, u'host': None, u'root_gb': 1, u'user_id': u'0a757cd896b64b65ba3784afef564116', u'system_metadata': { u'image_kernel_id': u'c8794c1a-4158-42cc-9f97-d0d250c9c6a4', u'image_min_disk': u'1', u'instance_type_memory_mb': u'512', u'instance_type_swap': u'0', u'instance_type_vcpu_weight': None, u'instance_type_root_gb': u'1', u'instance_type_name': u'm1.tiny', u'image_ramdisk_id': u'4999726c-545c-4a9e-bfc0-917459784275', u'instance_type_id': u'2', u'instance_type_ephemeral_gb': u'0', u'instance_type_rxtx_factor': u'1.0', u'instance_type_flavorid': u'1', u'instance_type_vcpus': u'1', u'image_container_format': u'ami', u'image_min_ram': u'0', u'image_disk_format': u'ami', u'image_base_image_ref': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b'}, u'task_state': u'scheduling', u'shutdown_terminate': False, u'cell_name': None, u'ephemeral_key_uuid': None, u'locked': False, u'name': u'instance-0000000f', u'created_at': u'2014-03-05T12:44:00.000000', u'locked_by': None, u'launch_index': 0, u'memory_mb': 512, u'vcpus': 1, u'image_ref': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b', u'architecture': None, u'auto_disk_config': False, u'os_type': None, u'metadata': {u'metering.server_group': u'Group_A', u'AutoScalingGroupName': u'tyky-Group_Awste7', u'metering.foo.bar': u'true'}}, u'security_group': [u'default'], u'instance_uuids': [u'9206baae-c3b6-41bc-96f2-2c0726ff51c8']}}, u'priority': u'INFO', u'_context_is_admin': True, u'_context_timestamp': u'2014-03-05T12:44:00.135674', u'publisher_id': u'scheduler.eglynn-f19-devstack3', u'message_id': u'd6c1ae63-a26b-47c7-8397-8794216e09dd', u'_context_remote_address': u'172.16.12.21', u'_context_roles': [u'_member_', u'admin'], u'timestamp': u'2014-03-05 12:44:00.733758', u'_context_user': u'0a757cd896b64b65ba3784afef564116', u'_unique_id': u'2af47cbdde604ff794bb046f3f9db1e2', u'_context_project_name': u'admin', u'_context_read_deleted': u'no', u'_context_tenant': u'2bd766a095b44486bf07cf7f666997eb', u'_context_instance_lock_checked': False, u'_context_project_id': u'2bd766a095b44486bf07cf7f666997eb', u'_context_user_name': u'admin' } class TestNotifications(base.BaseTestCase): def test_process_notification(self): info = list(instance.Instance(None).process_notification( INSTANCE_CREATE_END ))[0] for name, actual, expected in [ ('counter_name', info.name, 'instance'), ('counter_type', info.type, sample.TYPE_GAUGE), ('counter_volume', info.volume, 1), ('timestamp', info.timestamp, INSTANCE_CREATE_END['timestamp']), ('resource_id', info.resource_id, INSTANCE_CREATE_END['payload']['instance_id']), ('instance_type_id', info.resource_metadata['instance_type_id'], INSTANCE_CREATE_END['payload']['instance_type_id']), ('host', info.resource_metadata['host'], INSTANCE_CREATE_END['publisher_id']), ]: self.assertEqual(expected, actual, name) @staticmethod def _find_counter(counters, name): return filter(lambda counter: counter.name == name, counters)[0] def _verify_user_metadata(self, metadata): self.assertIn('user_metadata', metadata) user_meta = metadata['user_metadata'] self.assertEqual('Group_A', user_meta.get('server_group')) self.assertNotIn('AutoScalingGroupName', user_meta) self.assertIn('foo_bar', user_meta) self.assertNotIn('foo.bar', user_meta) def test_instance_create_instance(self): ic = instance.Instance(None) counters = list(ic.process_notification(INSTANCE_CREATE_END)) self.assertEqual(1, len(counters)) c = counters[0] self.assertEqual(1, c.volume) def test_instance_exists_instance(self): ic = instance.Instance(None) counters = list(ic.process_notification(INSTANCE_EXISTS)) self.assertEqual(1, len(counters)) def test_instance_exists_metadata_list(self): ic = instance.Instance(None) counters = list(ic.process_notification(INSTANCE_EXISTS_METADATA_LIST)) self.assertEqual(1, len(counters)) def test_instance_delete_instance(self): ic = instance.Instance(None) counters = list(ic.process_notification(INSTANCE_DELETE_START)) self.assertEqual(1, len(counters)) def test_instance_finish_resize_instance(self): ic = instance.Instance(None) counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END)) self.assertEqual(1, len(counters)) c = counters[0] self.assertEqual(1, c.volume) self._verify_user_metadata(c.resource_metadata) def test_instance_resize_finish_instance(self): ic = instance.Instance(None) counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END)) self.assertEqual(1, len(counters)) c = counters[0] self.assertEqual(1, c.volume) self._verify_user_metadata(c.resource_metadata) def test_instance_scheduled(self): ic = instance.InstanceScheduled(None) self.assertIn(INSTANCE_SCHEDULED['event_type'], ic.event_types) counters = list(ic.process_notification(INSTANCE_SCHEDULED)) self.assertEqual(1, len(counters)) names = [c.name for c in counters] self.assertEqual(['instance.scheduled'], names) rid = [c.resource_id for c in counters] self.assertEqual(['fake-uuid1-1'], rid) ceilometer-6.1.5/ceilometer/tests/unit/compute/__init__.py0000664000567000056710000000000013072744703025047 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/test_sample.py0000664000567000056710000000523613072744703024174 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/sample.py""" import datetime from ceilometer import sample from ceilometer.tests import base class TestSample(base.BaseTestCase): SAMPLE = sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, unit='ns', volume='1234567', user_id='56c5692032f34041900342503fecab30', project_id='ac9494df2d9d4e709bac378cceabaf23', resource_id='1ca738a1-c49c-4401-8346-5c60ebdb03f4', timestamp=datetime.datetime(2014, 10, 29, 14, 12, 15, 485877), resource_metadata={} ) def test_sample_string_format(self): expected = ('') self.assertEqual(expected, str(self.SAMPLE)) def test_sample_from_notifications_list(self): msg = { 'event_type': u'sample.create', 'timestamp': u'2015-06-1909: 19: 35.786893', 'payload': [{u'counter_name': u'instance100'}], 'priority': 'info', 'publisher_id': u'ceilometer.api', 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' } s = sample.Sample.from_notification( 'sample', 'type', 1.0, '%', 'user', 'project', 'res', msg) expected = {'event_type': msg['event_type'], 'host': msg['publisher_id']} self.assertEqual(expected, s.resource_metadata) def test_sample_from_notifications_dict(self): msg = { 'event_type': u'sample.create', 'timestamp': u'2015-06-1909: 19: 35.786893', 'payload': {u'counter_name': u'instance100'}, 'priority': 'info', 'publisher_id': u'ceilometer.api', 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' } s = sample.Sample.from_notification( 'sample', 'type', 1.0, '%', 'user', 'project', 'res', msg) msg['payload']['event_type'] = msg['event_type'] msg['payload']['host'] = msg['publisher_id'] self.assertEqual(msg['payload'], s.resource_metadata) ceilometer-6.1.5/ceilometer/tests/unit/test_neutronclient.py0000664000567000056710000001714213072744706025606 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import base from ceilometer import neutron_client class TestNeutronClient(base.BaseTestCase): def setUp(self): super(TestNeutronClient, self).setUp() self.nc = neutron_client.Client() self.nc.lb_version = 'v1' @staticmethod def fake_ports_list(): return {'ports': [{'admin_state_up': True, 'device_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'device_owner': 'network:router_gateway', 'extra_dhcp_opts': [], 'id': '96d49cc3-4e01-40ce-9cac-c0e32642a442', 'mac_address': 'fa:16:3e:c5:35:93', 'name': '', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'status': 'ACTIVE', 'tenant_id': '89271fa581ab4380bf172f868c3615f9'}, ]} def test_port_get_all(self): with mock.patch.object(self.nc.client, 'list_ports', side_effect=self.fake_ports_list): ports = self.nc.port_get_all() self.assertEqual(1, len(ports)) self.assertEqual('96d49cc3-4e01-40ce-9cac-c0e32642a442', ports[0]['id']) @staticmethod def fake_networks_list(): return {'networks': [{'admin_state_up': True, 'id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'name': 'public', 'provider:network_type': 'gre', 'provider:physical_network': None, 'provider:segmentation_id': 2, 'router:external': True, 'shared': False, 'status': 'ACTIVE', 'subnets': [u'c4b6f5b8-3508-4896-b238-a441f25fb492'], 'tenant_id': '62d6f08bbd3a44f6ad6f00ca15cce4e5'}, ]} @staticmethod def fake_pool_list(): return {'pools': [{'status': 'ACTIVE', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'status_description': None, 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, ]} def test_pool_list(self): with mock.patch.object(self.nc.client, 'list_pools', side_effect=self.fake_pool_list): pools = self.nc.pool_get_all() self.assertEqual(1, len(pools)) self.assertEqual('ce73ad36-437d-4c84-aee1-186027d3da9a', pools[0]['id']) @staticmethod def fake_vip_list(): return {'vips': [{'status': 'ACTIVE', 'status_description': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'connection_limit': -1, 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'session_persistence': None, 'address': '10.0.0.2', 'protocol_port': 80, 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'myvip'}, ]} def test_vip_list(self): with mock.patch.object(self.nc.client, 'list_vips', side_effect=self.fake_vip_list): vips = self.nc.vip_get_all() self.assertEqual(1, len(vips)) self.assertEqual('cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', vips[0]['id']) @staticmethod def fake_member_list(): return {'members': [{'status': 'ACTIVE', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'address': '10.0.0.3', 'status_description': None, 'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'}, ]} def test_member_list(self): with mock.patch.object(self.nc.client, 'list_members', side_effect=self.fake_member_list): members = self.nc.member_get_all() self.assertEqual(1, len(members)) self.assertEqual('290b61eb-07bc-4372-9fbf-36459dd0f96b', members[0]['id']) @staticmethod def fake_monitors_list(): return {'health_monitors': [{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365', 'admin_state_up': True, 'tenant_id': "d5d2817dae6b42159be9b665b64beb0e", 'delay': 2, 'max_retries': 5, 'timeout': 5, 'pools': [], 'type': 'PING', }]} def test_monitor_list(self): with mock.patch.object(self.nc.client, 'list_health_monitors', side_effect=self.fake_monitors_list): monitors = self.nc.health_monitor_get_all() self.assertEqual(1, len(monitors)) self.assertEqual('34ae33e1-0035-49e2-a2ca-77d5d3fab365', monitors[0]['id']) @staticmethod def fake_pool_stats(fake_pool): return {'stats': [{'active_connections': 1, 'total_connections': 2, 'bytes_in': 3, 'bytes_out': 4 }]} def test_pool_stats(self): with mock.patch.object(self.nc.client, 'retrieve_pool_stats', side_effect=self.fake_pool_stats): stats = self.nc.pool_stats('fake_pool')['stats'] self.assertEqual(1, len(stats)) self.assertEqual(1, stats[0]['active_connections']) self.assertEqual(2, stats[0]['total_connections']) self.assertEqual(3, stats[0]['bytes_in']) self.assertEqual(4, stats[0]['bytes_out']) def test_v1_list_loadbalancer_returns_empty_list(self): self.assertEqual([], self.nc.list_loadbalancer()) def test_v1_list_listener_returns_empty_list(self): self.assertEqual([], self.nc.list_listener()) ceilometer-6.1.5/ceilometer/tests/unit/dispatcher/0000775000567000056710000000000013072745164023424 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/dispatcher/test_gnocchi.py0000664000567000056710000004062113072744706026453 0ustar jenkinsjenkins00000000000000# # Copyright 2014 eNovance # # Authors: Mehdi Abaakouk # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import uuid from gnocchiclient import exceptions as gnocchi_exc import mock from oslo_config import fixture as config_fixture from oslo_utils import fileutils from oslotest import mockpatch import requests import six import testscenarios from ceilometer.dispatcher import gnocchi from ceilometer import service as ceilometer_service from ceilometer.tests import base load_tests = testscenarios.load_tests_apply_scenarios @mock.patch('gnocchiclient.v1.client.Client', mock.Mock()) class DispatcherTest(base.BaseTestCase): def setUp(self): super(DispatcherTest, self).setUp() self.conf = self.useFixture(config_fixture.Config()) ceilometer_service.prepare_service(argv=[], config_files=[]) self.conf.config( resources_definition_file=self.path_get( 'etc/ceilometer/gnocchi_resources.yaml'), group="dispatcher_gnocchi" ) self.resource_id = str(uuid.uuid4()) self.samples = [{ 'counter_name': 'disk.root.size', 'counter_type': 'gauge', 'counter_volume': '2', 'user_id': 'test_user', 'project_id': 'test_project', 'source': 'openstack', 'timestamp': '2012-05-08 20:23:48.028195', 'resource_id': self.resource_id, 'resource_metadata': { 'host': 'foo', 'image_ref': 'imageref!', 'instance_flavor_id': 1234, 'display_name': 'myinstance', }}, { 'counter_name': 'disk.root.size', 'counter_type': 'gauge', 'counter_volume': '2', 'user_id': 'test_user', 'project_id': 'test_project', 'source': 'openstack', 'timestamp': '2014-05-08 20:23:48.028195', 'resource_id': self.resource_id, 'resource_metadata': { 'host': 'foo', 'image_ref': 'imageref!', 'instance_flavor_id': 1234, 'display_name': 'myinstance', } }] ks_client = mock.Mock(auth_token='fake_token') ks_client.projects.find.return_value = mock.Mock( name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') self.useFixture(mockpatch.Patch( 'ceilometer.keystone_client.get_client', return_value=ks_client)) self.conf.conf.dispatcher_gnocchi.filter_service_activity = True def test_config_load(self): self.conf.config(filter_service_activity=False, group='dispatcher_gnocchi') d = gnocchi.GnocchiDispatcher(self.conf.conf) names = [rd.cfg['resource_type'] for rd in d.resources_definition] self.assertIn('instance', names) self.assertIn('volume', names) @mock.patch('ceilometer.dispatcher.gnocchi.LOG') def test_broken_config_load(self, mylog): contents = [("---\n" "resources:\n" " - resource_type: foobar\n"), ("---\n" "resources:\n" " - resource_type: 0\n"), ("---\n" "resources:\n" " - sample_types: ['foo', 'bar']\n"), ("---\n" "resources:\n" " - sample_types: foobar\n" " - resource_type: foobar\n"), ] for content in contents: if six.PY3: content = content.encode('utf-8') temp = fileutils.write_to_tempfile(content=content, prefix='gnocchi_resources', suffix='.yaml') self.addCleanup(os.remove, temp) self.conf.config(filter_service_activity=False, resources_definition_file=temp, group='dispatcher_gnocchi') d = gnocchi.GnocchiDispatcher(self.conf.conf) self.assertTrue(mylog.error.called) self.assertEqual(0, len(d.resources_definition)) @mock.patch('ceilometer.dispatcher.gnocchi.GnocchiDispatcher' '._process_resource') def _do_test_activity_filter(self, expected_samples, fake_process_resource): def assert_samples(resource_id, metric_grouped_samples): samples = [] for metric_name, s in metric_grouped_samples: samples.extend(list(s)) self.assertEqual(expected_samples, samples) fake_process_resource.side_effect = assert_samples d = gnocchi.GnocchiDispatcher(self.conf.conf) d.record_metering_data(self.samples) fake_process_resource.assert_called_with(self.resource_id, mock.ANY) def test_activity_filter_match_project_id(self): self.samples[0]['project_id'] = ( 'a2d42c23-d518-46b6-96ab-3fba2e146859') self._do_test_activity_filter([self.samples[1]]) def test_activity_filter_match_swift_event(self): self.samples[0]['counter_name'] = 'storage.api.request' self.samples[0]['resource_id'] = 'a2d42c23-d518-46b6-96ab-3fba2e146859' self._do_test_activity_filter([self.samples[1]]) def test_activity_filter_nomatch(self): self._do_test_activity_filter(self.samples) class MockResponse(mock.NonCallableMock): def __init__(self, code): text = {500: 'Internal Server Error', 404: 'Not Found', 204: 'Created', 409: 'Conflict', }.get(code) super(MockResponse, self).__init__(spec=requests.Response, status_code=code, text=text) class DispatcherWorkflowTest(base.BaseTestCase, testscenarios.TestWithScenarios): sample_scenarios = [ ('disk.root.size', dict( sample={ 'counter_name': 'disk.root.size', 'counter_type': 'gauge', 'counter_volume': '2', 'user_id': 'test_user', 'project_id': 'test_project', 'source': 'openstack', 'timestamp': '2012-05-08 20:23:48.028195', 'resource_metadata': { 'host': 'foo', 'image_ref': 'imageref!', 'instance_flavor_id': 1234, 'display_name': 'myinstance', } }, measures_attributes=[{ 'timestamp': '2012-05-08 20:23:48.028195', 'value': '2' }], postable_attributes={ 'user_id': 'test_user', 'project_id': 'test_project', }, patchable_attributes={ 'host': 'foo', 'image_ref': 'imageref!', 'flavor_id': 1234, 'display_name': 'myinstance', }, metric_names=[ 'instance', 'disk.root.size', 'disk.ephemeral.size', 'memory', 'vcpus', 'memory.usage', 'memory.resident', 'cpu', 'cpu.delta', 'cpu_util', 'vcpus', 'disk.read.requests', 'disk.read.requests.rate', 'disk.write.requests', 'disk.write.requests.rate', 'disk.read.bytes', 'disk.read.bytes.rate', 'disk.write.bytes', 'disk.write.bytes.rate', 'disk.latency', 'disk.iops', 'disk.capacity', 'disk.allocation', 'disk.usage'], resource_type='instance')), ('hardware.ipmi.node.power', dict( sample={ 'counter_name': 'hardware.ipmi.node.power', 'counter_type': 'gauge', 'counter_volume': '2', 'user_id': 'test_user', 'project_id': 'test_project', 'source': 'openstack', 'timestamp': '2012-05-08 20:23:48.028195', 'resource_metadata': { 'useless': 'not_used', } }, measures_attributes=[{ 'timestamp': '2012-05-08 20:23:48.028195', 'value': '2' }], postable_attributes={ 'user_id': 'test_user', 'project_id': 'test_project', }, patchable_attributes={ }, metric_names=[ 'hardware.ipmi.node.power', 'hardware.ipmi.node.temperature', 'hardware.ipmi.node.inlet_temperature', 'hardware.ipmi.node.outlet_temperature', 'hardware.ipmi.node.fan', 'hardware.ipmi.node.current', 'hardware.ipmi.node.voltage', 'hardware.ipmi.node.airflow', 'hardware.ipmi.node.cups', 'hardware.ipmi.node.cpu_util', 'hardware.ipmi.node.mem_util', 'hardware.ipmi.node.io_util' ], resource_type='ipmi')), ] worflow_scenarios = [ ('normal_workflow', dict(measure=204, post_resource=None, metric=None, measure_retry=None, patch_resource=204)), ('new_resource', dict(measure=404, post_resource=204, metric=None, measure_retry=204, patch_resource=204)), ('new_resource_fail', dict(measure=404, post_resource=500, metric=None, measure_retry=None, patch_resource=None)), ('resource_update_fail', dict(measure=204, post_resource=None, metric=None, measure_retry=None, patch_resource=500)), ('new_metric', dict(measure=404, post_resource=None, metric=204, measure_retry=204, patch_resource=204)), ('new_metric_fail', dict(measure=404, post_resource=None, metric=500, measure_retry=None, patch_resource=None)), ('retry_fail', dict(measure=404, post_resource=409, metric=None, measure_retry=500, patch_resource=None)), ('measure_fail', dict(measure=500, post_resource=None, metric=None, measure_retry=None, patch_resource=None)), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls.sample_scenarios, cls.worflow_scenarios) def setUp(self): super(DispatcherWorkflowTest, self).setUp() self.conf = self.useFixture(config_fixture.Config()) # Set this explicitly to avoid conflicts with any existing # configuration. self.conf.config(url='http://localhost:8041', group='dispatcher_gnocchi') ks_client = mock.Mock() ks_client.projects.find.return_value = mock.Mock( name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') self.useFixture(mockpatch.Patch( 'ceilometer.keystone_client.get_client', return_value=ks_client)) self.ks_client = ks_client ceilometer_service.prepare_service(argv=[], config_files=[]) self.conf.config( resources_definition_file=self.path_get( 'etc/ceilometer/gnocchi_resources.yaml'), group="dispatcher_gnocchi" ) self.sample['resource_id'] = str(uuid.uuid4()) + "/foobar" @mock.patch('ceilometer.dispatcher.gnocchi.LOG') @mock.patch('gnocchiclient.v1.client.Client') def test_workflow(self, fakeclient_cls, logger): self.dispatcher = gnocchi.GnocchiDispatcher(self.conf.conf) fakeclient = fakeclient_cls.return_value # FIXME(sileht): we don't use urlparse.quote here # to ensure / is converted in %2F # temporary disabled until we find a solution # on gnocchi side. Current gnocchiclient doesn't # encode the resource_id resource_id = self.sample['resource_id'] # .replace("/", "%2F"), metric_name = self.sample['counter_name'] expected_calls = [ mock.call.capabilities.list(), mock.call.metric.add_measures(metric_name, self.measures_attributes, resource_id)] add_measures_side_effect = [] if self.measure == 404 and self.post_resource: add_measures_side_effect += [ gnocchi_exc.ResourceNotFound(404)] elif self.measure == 404 and self.metric: add_measures_side_effect += [ gnocchi_exc.MetricNotFound(404)] elif self.measure == 500: add_measures_side_effect += [Exception('boom!')] if self.post_resource: attributes = self.postable_attributes.copy() attributes.update(self.patchable_attributes) attributes['id'] = self.sample['resource_id'] attributes['metrics'] = dict((metric_name, {}) for metric_name in self.metric_names) expected_calls.append(mock.call.resource.create( self.resource_type, attributes)) if self.post_resource == 409: fakeclient.resource.create.side_effect = [ gnocchi_exc.ResourceAlreadyExists(409)] elif self.post_resource == 500: fakeclient.resource.create.side_effect = [Exception('boom!')] if self.metric: expected_calls.append(mock.call.metric.create({ 'name': self.sample['counter_name'], 'resource_id': resource_id})) if self.metric == 409: fakeclient.metric.create.side_effect = [ gnocchi_exc.NamedMetricAreadyExists(409)] elif self.metric == 500: fakeclient.metric.create.side_effect = [Exception('boom!')] if self.measure_retry: expected_calls.append(mock.call.metric.add_measures( metric_name, self.measures_attributes, resource_id)) if self.measure_retry == 204: add_measures_side_effect += [None] elif self.measure_retry == 500: add_measures_side_effect += [ Exception('boom!')] else: add_measures_side_effect += [None] if self.patch_resource and self.patchable_attributes: expected_calls.append(mock.call.resource.update( self.resource_type, resource_id, self.patchable_attributes)) if self.patch_resource == 500: fakeclient.resource.update.side_effect = [Exception('boom!')] fakeclient.metric.add_measures.side_effect = add_measures_side_effect self.dispatcher.record_metering_data([self.sample]) # Check that the last log message is the expected one if (self.measure == 500 or self.measure_retry == 500 or self.metric == 500 or self.post_resource == 500 or (self.patch_resource == 500 and self.patchable_attributes)): logger.error.assert_called_with('boom!', exc_info=True) elif self.patch_resource == 204 and self.patchable_attributes: logger.debug.assert_called_with( 'Resource %s updated', self.sample['resource_id']) self.assertEqual(0, logger.error.call_count) elif self.measure == 200: logger.debug.assert_called_with( "Measure posted on metric %s of resource %s", self.sample['counter_name'], self.sample['resource_id']) self.assertEqual(0, logger.error.call_count) self.assertEqual(expected_calls, fakeclient.mock_calls) DispatcherWorkflowTest.generate_scenarios() ceilometer-6.1.5/ceilometer/tests/unit/dispatcher/test_db.py0000664000567000056710000001261713072744706025432 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid import mock from oslo_config import fixture as fixture_config from oslotest import base from ceilometer.dispatcher import database from ceilometer.event.storage import models as event_models from ceilometer.publisher import utils class TestDispatcherDB(base.BaseTestCase): def setUp(self): super(TestDispatcherDB, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF.set_override('connection', 'sqlite://', group='database') self.dispatcher = database.DatabaseDispatcher(self.CONF) self.ctx = None def test_event_conn(self): event = event_models.Event(uuid.uuid4(), 'test', datetime.datetime(2012, 7, 2, 13, 53, 40), [], {}) event = utils.message_from_event(event, self.CONF.publisher.telemetry_secret) with mock.patch.object(self.dispatcher.event_conn, 'record_events') as record_events: self.dispatcher.record_events(event) self.assertEqual(1, len(record_events.call_args_list[0][0][0])) @mock.patch('ceilometer.publisher.utils.verify_signature') def test_event_with_bad_signature(self, mocked_verify): event = event_models.Event(uuid.uuid4(), 'test', datetime.datetime(2012, 7, 2, 13, 53, 40), [], {}).serialize() def _fake_verify(ev, secret): if ev.get('message_signature') == 'bad_signature': return False return True mocked_verify.side_effect = _fake_verify with mock.patch.object(self.dispatcher.event_conn, 'record_events') as record_events: event['message_signature'] = 'bad_signature' self.dispatcher.record_events(event) self.assertEqual([], record_events.call_args_list[0][0][0]) del event['message_signature'] event['message_signature'] = utils.compute_signature( event, self.CONF.publisher.telemetry_secret) self.dispatcher.record_events(event) self.assertEqual(1, len(record_events.call_args_list[1][0][0])) def test_valid_message(self): msg = {'counter_name': 'test', 'resource_id': self.id(), 'counter_volume': 1, } msg['message_signature'] = utils.compute_signature( msg, self.CONF.publisher.telemetry_secret, ) with mock.patch.object(self.dispatcher.meter_conn, 'record_metering_data') as record_metering_data: self.dispatcher.record_metering_data(msg) record_metering_data.assert_called_once_with(msg) def test_invalid_message(self): msg = {'counter_name': 'test', 'resource_id': self.id(), 'counter_volume': 1, 'message_signature': 'invalid-signature'} class ErrorConnection(object): called = False def record_metering_data(self, data): self.called = True self.dispatcher._meter_conn = ErrorConnection() self.dispatcher.record_metering_data(msg) if self.dispatcher.meter_conn.called: self.fail('Should not have called the storage connection') def test_timestamp_conversion(self): msg = {'counter_name': 'test', 'resource_id': self.id(), 'counter_volume': 1, 'timestamp': '2012-07-02T13:53:40Z', } msg['message_signature'] = utils.compute_signature( msg, self.CONF.publisher.telemetry_secret, ) expected = msg.copy() expected['timestamp'] = datetime.datetime(2012, 7, 2, 13, 53, 40) with mock.patch.object(self.dispatcher.meter_conn, 'record_metering_data') as record_metering_data: self.dispatcher.record_metering_data(msg) record_metering_data.assert_called_once_with(expected) def test_timestamp_tzinfo_conversion(self): msg = {'counter_name': 'test', 'resource_id': self.id(), 'counter_volume': 1, 'timestamp': '2012-09-30T15:31:50.262-08:00', } msg['message_signature'] = utils.compute_signature( msg, self.CONF.publisher.telemetry_secret, ) expected = msg.copy() expected['timestamp'] = datetime.datetime(2012, 9, 30, 23, 31, 50, 262000) with mock.patch.object(self.dispatcher.meter_conn, 'record_metering_data') as record_metering_data: self.dispatcher.record_metering_data(msg) record_metering_data.assert_called_once_with(expected) ceilometer-6.1.5/ceilometer/tests/unit/dispatcher/test_http.py0000664000567000056710000001062713072744706026023 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid import mock from oslo_config import fixture as fixture_config from oslotest import base import requests from ceilometer.dispatcher import http from ceilometer.event.storage import models as event_models from ceilometer.publisher import utils class TestDispatcherHttp(base.BaseTestCase): def setUp(self): super(TestDispatcherHttp, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.msg = {'counter_name': 'test', 'resource_id': self.id(), 'counter_volume': 1, } self.msg['message_signature'] = utils.compute_signature( self.msg, self.CONF.publisher.telemetry_secret, ) def test_http_dispatcher_config_options(self): self.CONF.dispatcher_http.target = 'fake' self.CONF.dispatcher_http.timeout = 2 dispatcher = http.HttpDispatcher(self.CONF) self.assertEqual('fake', dispatcher.target) self.assertEqual(2, dispatcher.timeout) def test_http_dispatcher_with_no_target(self): self.CONF.dispatcher_http.target = '' dispatcher = http.HttpDispatcher(self.CONF) # The target should be None self.assertEqual('', dispatcher.target) with mock.patch.object(requests, 'post') as post: dispatcher.record_metering_data(self.msg) # Since the target is not set, no http post should occur, thus the # call_count should be zero. self.assertEqual(0, post.call_count) def test_http_dispatcher_with_no_metadata(self): self.CONF.dispatcher_http.target = 'fake' dispatcher = http.HttpDispatcher(self.CONF) with mock.patch.object(requests, 'post') as post: dispatcher.record_metering_data(self.msg) self.assertEqual(1, post.call_count) class TestEventDispatcherHttp(base.BaseTestCase): def setUp(self): super(TestEventDispatcherHttp, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf def test_http_dispatcher(self): self.CONF.dispatcher_http.event_target = 'fake' dispatcher = http.HttpDispatcher(self.CONF) event = event_models.Event(uuid.uuid4(), 'test', datetime.datetime(2012, 7, 2, 13, 53, 40), [], {}) event = utils.message_from_event(event, self.CONF.publisher.telemetry_secret) with mock.patch.object(requests, 'post') as post: dispatcher.record_events(event) self.assertEqual(1, post.call_count) def test_http_dispatcher_bad(self): self.CONF.dispatcher_http.event_target = '' dispatcher = http.HttpDispatcher(self.CONF) event = event_models.Event(uuid.uuid4(), 'test', datetime.datetime(2012, 7, 2, 13, 53, 40), [], {}) event = utils.message_from_event(event, self.CONF.publisher.telemetry_secret) with mock.patch('ceilometer.dispatcher.http.LOG', mock.MagicMock()) as LOG: dispatcher.record_events(event) self.assertTrue(LOG.exception.called) def test_http_dispatcher_share_target(self): self.CONF.dispatcher_http.target = 'fake' dispatcher = http.HttpDispatcher(self.CONF) event = event_models.Event(uuid.uuid4(), 'test', datetime.datetime(2012, 7, 2, 13, 53, 40), [], {}) event = utils.message_from_event(event, self.CONF.publisher.telemetry_secret) with mock.patch.object(requests, 'post') as post: dispatcher.record_events(event) self.assertEqual('fake', post.call_args[0][0]) ceilometer-6.1.5/ceilometer/tests/unit/dispatcher/test_file.py0000664000567000056710000000710013072744706025753 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging.handlers import os import tempfile from oslo_config import fixture as fixture_config from oslotest import base from ceilometer.dispatcher import file from ceilometer.publisher import utils class TestDispatcherFile(base.BaseTestCase): def setUp(self): super(TestDispatcherFile, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf def test_file_dispatcher_with_all_config(self): # Create a temporaryFile to get a file name tf = tempfile.NamedTemporaryFile('r') filename = tf.name tf.close() self.CONF.dispatcher_file.file_path = filename self.CONF.dispatcher_file.max_bytes = 50 self.CONF.dispatcher_file.backup_count = 5 dispatcher = file.FileDispatcher(self.CONF) # The number of the handlers should be 1 self.assertEqual(1, len(dispatcher.log.handlers)) # The handler should be RotatingFileHandler handler = dispatcher.log.handlers[0] self.assertIsInstance(handler, logging.handlers.RotatingFileHandler) msg = {'counter_name': 'test', 'resource_id': self.id(), 'counter_volume': 1, } msg['message_signature'] = utils.compute_signature( msg, self.CONF.publisher.telemetry_secret, ) # The record_metering_data method should exist and not produce errors. dispatcher.record_metering_data(msg) # After the method call above, the file should have been created. self.assertTrue(os.path.exists(handler.baseFilename)) def test_file_dispatcher_with_path_only(self): # Create a temporaryFile to get a file name tf = tempfile.NamedTemporaryFile('r') filename = tf.name tf.close() self.CONF.dispatcher_file.file_path = filename self.CONF.dispatcher_file.max_bytes = 0 self.CONF.dispatcher_file.backup_count = 0 dispatcher = file.FileDispatcher(self.CONF) # The number of the handlers should be 1 self.assertEqual(1, len(dispatcher.log.handlers)) # The handler should be RotatingFileHandler handler = dispatcher.log.handlers[0] self.assertIsInstance(handler, logging.FileHandler) msg = {'counter_name': 'test', 'resource_id': self.id(), 'counter_volume': 1, } msg['message_signature'] = utils.compute_signature( msg, self.CONF.publisher.telemetry_secret, ) # The record_metering_data method should exist and not produce errors. dispatcher.record_metering_data(msg) # After the method call above, the file should have been created. self.assertTrue(os.path.exists(handler.baseFilename)) def test_file_dispatcher_with_no_path(self): self.CONF.dispatcher_file.file_path = None dispatcher = file.FileDispatcher(self.CONF) # The log should be None self.assertIsNone(dispatcher.log) ceilometer-6.1.5/ceilometer/tests/unit/dispatcher/test_dispatcher.py0000664000567000056710000000342413072744706027167 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import fixture from oslotest import mockpatch from ceilometer import dispatcher from ceilometer.tests import base class FakeDispatcherSample(dispatcher.MeterDispatcherBase): def record_metering_data(self, data): pass class FakeDispatcher(dispatcher.MeterDispatcherBase, dispatcher.EventDispatcherBase): def record_metering_data(self, data): pass def record_events(self, events): pass class TestDispatchManager(base.BaseTestCase): def setUp(self): super(TestDispatchManager, self).setUp() self.conf = self.useFixture(fixture.Config()) self.conf.config(meter_dispatchers=['database', 'gnocchi'], event_dispatchers=['database']) self.useFixture(mockpatch.Patch( 'ceilometer.dispatcher.gnocchi.GnocchiDispatcher', new=FakeDispatcherSample)) self.useFixture(mockpatch.Patch( 'ceilometer.dispatcher.database.DatabaseDispatcher', new=FakeDispatcher)) def test_load(self): sample_mg, event_mg = dispatcher.load_dispatcher_manager() self.assertEqual(2, len(list(sample_mg))) self.assertEqual(1, len(list(event_mg))) ceilometer-6.1.5/ceilometer/tests/unit/dispatcher/__init__.py0000664000567000056710000000000013072744703025521 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/publisher/0000775000567000056710000000000013072745164023273 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/publisher/test_messaging_publisher.py0000664000567000056710000002705413072744706030747 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/messaging.py """ import datetime import uuid import mock from oslo_config import fixture as fixture_config from oslo_utils import netutils import testscenarios.testcase from ceilometer.event.storage import models as event from ceilometer.publisher import messaging as msg_publisher from ceilometer import sample from ceilometer.tests import base as tests_base class BasePublisherTestCase(tests_base.BaseTestCase): test_event_data = [ event.Event(message_id=uuid.uuid4(), event_type='event_%d' % i, generated=datetime.datetime.utcnow(), traits=[], raw={}) for i in range(0, 5) ] test_sample_data = [ sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test3', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] def setUp(self): super(BasePublisherTestCase, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.setup_messaging(self.CONF) class NotifierOnlyPublisherTest(BasePublisherTestCase): @mock.patch('oslo_messaging.Notifier') def test_publish_topic_override(self, notifier): msg_publisher.SampleNotifierPublisher( netutils.urlsplit('notifier://?topic=custom_topic')) notifier.assert_called_with(mock.ANY, topic='custom_topic', driver=mock.ANY, retry=mock.ANY, publisher_id=mock.ANY) msg_publisher.EventNotifierPublisher( netutils.urlsplit('notifier://?topic=custom_event_topic')) notifier.assert_called_with(mock.ANY, topic='custom_event_topic', driver=mock.ANY, retry=mock.ANY, publisher_id=mock.ANY) class TestPublisher(testscenarios.testcase.WithScenarios, BasePublisherTestCase): scenarios = [ ('notifier', dict(protocol="notifier", publisher_cls=msg_publisher.SampleNotifierPublisher, test_data=BasePublisherTestCase.test_sample_data, pub_func='publish_samples', attr='source')), ('event_notifier', dict(protocol="notifier", publisher_cls=msg_publisher.EventNotifierPublisher, test_data=BasePublisherTestCase.test_event_data, pub_func='publish_events', attr='event_type')), ] def setUp(self): super(TestPublisher, self).setUp() self.topic = (self.CONF.publisher_notifier.event_topic if self.pub_func == 'publish_events' else self.CONF.publisher_notifier.metering_topic) class TestPublisherPolicy(TestPublisher): @mock.patch('ceilometer.publisher.messaging.LOG') def test_published_with_no_policy(self, mylog): publisher = self.publisher_cls( netutils.urlsplit('%s://' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( msg_publisher.DeliveryFailure, getattr(publisher, self.pub_func), mock.MagicMock(), self.test_data) self.assertTrue(mylog.info.called) self.assertEqual('default', publisher.policy) self.assertEqual(0, len(publisher.local_queue)) fake_send.assert_called_once_with( mock.ANY, self.topic, mock.ANY) @mock.patch('ceilometer.publisher.messaging.LOG') def test_published_with_policy_block(self, mylog): publisher = self.publisher_cls( netutils.urlsplit('%s://?policy=default' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( msg_publisher.DeliveryFailure, getattr(publisher, self.pub_func), mock.MagicMock(), self.test_data) self.assertTrue(mylog.info.called) self.assertEqual(0, len(publisher.local_queue)) fake_send.assert_called_once_with( mock.ANY, self.topic, mock.ANY) @mock.patch('ceilometer.publisher.messaging.LOG') def test_published_with_policy_incorrect(self, mylog): publisher = self.publisher_cls( netutils.urlsplit('%s://?policy=notexist' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( msg_publisher.DeliveryFailure, getattr(publisher, self.pub_func), mock.MagicMock(), self.test_data) self.assertTrue(mylog.warning.called) self.assertEqual('default', publisher.policy) self.assertEqual(0, len(publisher.local_queue)) fake_send.assert_called_once_with( mock.ANY, self.topic, mock.ANY) @mock.patch('ceilometer.publisher.messaging.LOG', mock.Mock()) class TestPublisherPolicyReactions(TestPublisher): def test_published_with_policy_drop_and_rpc_down(self): publisher = self.publisher_cls( netutils.urlsplit('%s://?policy=drop' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect getattr(publisher, self.pub_func)(mock.MagicMock(), self.test_data) self.assertEqual(0, len(publisher.local_queue)) fake_send.assert_called_once_with( mock.ANY, self.topic, mock.ANY) def test_published_with_policy_queue_and_rpc_down(self): publisher = self.publisher_cls( netutils.urlsplit('%s://?policy=queue' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect getattr(publisher, self.pub_func)(mock.MagicMock(), self.test_data) self.assertEqual(1, len(publisher.local_queue)) fake_send.assert_called_once_with( mock.ANY, self.topic, mock.ANY) def test_published_with_policy_queue_and_rpc_down_up(self): self.rpc_unreachable = True publisher = self.publisher_cls( netutils.urlsplit('%s://?policy=queue' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect getattr(publisher, self.pub_func)(mock.MagicMock(), self.test_data) self.assertEqual(1, len(publisher.local_queue)) fake_send.side_effect = mock.MagicMock() getattr(publisher, self.pub_func)(mock.MagicMock(), self.test_data) self.assertEqual(0, len(publisher.local_queue)) topic = self.topic expected = [mock.call(mock.ANY, topic, mock.ANY), mock.call(mock.ANY, topic, mock.ANY), mock.call(mock.ANY, topic, mock.ANY)] self.assertEqual(expected, fake_send.mock_calls) def test_published_with_policy_sized_queue_and_rpc_down(self): publisher = self.publisher_cls(netutils.urlsplit( '%s://?policy=queue&max_queue_length=3' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect for i in range(0, 5): for s in self.test_data: setattr(s, self.attr, 'test-%d' % i) getattr(publisher, self.pub_func)(mock.MagicMock(), self.test_data) self.assertEqual(3, len(publisher.local_queue)) self.assertEqual( 'test-2', publisher.local_queue[0][2][0][self.attr] ) self.assertEqual( 'test-3', publisher.local_queue[1][2][0][self.attr] ) self.assertEqual( 'test-4', publisher.local_queue[2][2][0][self.attr] ) def test_published_with_policy_default_sized_queue_and_rpc_down(self): publisher = self.publisher_cls( netutils.urlsplit('%s://?policy=queue' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect for i in range(0, 2000): for s in self.test_data: setattr(s, self.attr, 'test-%d' % i) getattr(publisher, self.pub_func)(mock.MagicMock(), self.test_data) self.assertEqual(1024, len(publisher.local_queue)) self.assertEqual( 'test-976', publisher.local_queue[0][2][0][self.attr] ) self.assertEqual( 'test-1999', publisher.local_queue[1023][2][0][self.attr] ) ceilometer-6.1.5/ceilometer/tests/unit/publisher/test_utils.py0000664000567000056710000001245313072744706026052 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/utils.py """ from oslo_serialization import jsonutils from oslotest import base from ceilometer.publisher import utils class TestSignature(base.BaseTestCase): def test_compute_signature_change_key(self): sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, 'not-so-secret') sig2 = utils.compute_signature({'A': 'A', 'b': 'B'}, 'not-so-secret') self.assertNotEqual(sig1, sig2) def test_compute_signature_change_value(self): sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, 'not-so-secret') sig2 = utils.compute_signature({'a': 'a', 'b': 'B'}, 'not-so-secret') self.assertNotEqual(sig1, sig2) def test_compute_signature_same(self): sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, 'not-so-secret') sig2 = utils.compute_signature({'a': 'A', 'b': 'B'}, 'not-so-secret') self.assertEqual(sig1, sig2) def test_compute_signature_signed(self): data = {'a': 'A', 'b': 'B'} sig1 = utils.compute_signature(data, 'not-so-secret') data['message_signature'] = sig1 sig2 = utils.compute_signature(data, 'not-so-secret') self.assertEqual(sig1, sig2) def test_compute_signature_use_configured_secret(self): data = {'a': 'A', 'b': 'B'} sig1 = utils.compute_signature(data, 'not-so-secret') sig2 = utils.compute_signature(data, 'different-value') self.assertNotEqual(sig1, sig2) def test_verify_signature_signed(self): data = {'a': 'A', 'b': 'B'} sig1 = utils.compute_signature(data, 'not-so-secret') data['message_signature'] = sig1 self.assertTrue(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_unsigned(self): data = {'a': 'A', 'b': 'B'} self.assertFalse(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_incorrect(self): data = {'a': 'A', 'b': 'B', 'message_signature': 'Not the same'} self.assertFalse(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_invalid_encoding(self): data = {'a': 'A', 'b': 'B', 'message_signature': ''} self.assertFalse(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_unicode(self): data = {'a': 'A', 'b': 'B', 'message_signature': u''} self.assertFalse(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_nested(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', }, } data['message_signature'] = utils.compute_signature( data, 'not-so-secret') self.assertTrue(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_nested_json(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', 'c': ('c',), 'd': ['d'] }, } data['message_signature'] = utils.compute_signature( data, 'not-so-secret') jsondata = jsonutils.loads(jsonutils.dumps(data)) self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret')) def test_verify_unicode_symbols(self): data = {u'a\xe9\u0437': 'A', 'b': u'B\xe9\u0437' } data['message_signature'] = utils.compute_signature( data, 'not-so-secret') jsondata = jsonutils.loads(jsonutils.dumps(data)) self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret')) def test_besteffort_compare_digest(self): hash1 = "f5ac3fe42b80b80f979825d177191bc5" hash2 = "f5ac3fe42b80b80f979825d177191bc5" hash3 = "1dece7821bf3fd70fe1309eaa37d52a2" hash4 = b"f5ac3fe42b80b80f979825d177191bc5" hash5 = b"f5ac3fe42b80b80f979825d177191bc5" hash6 = b"1dece7821bf3fd70fe1309eaa37d52a2" self.assertTrue(utils.besteffort_compare_digest(hash1, hash2)) self.assertFalse(utils.besteffort_compare_digest(hash1, hash3)) self.assertTrue(utils.besteffort_compare_digest(hash4, hash5)) self.assertFalse(utils.besteffort_compare_digest(hash4, hash6)) def test_verify_no_secret(self): data = {'a': 'A', 'b': 'B'} self.assertTrue(utils.verify_signature(data, '')) ceilometer-6.1.5/ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py0000664000567000056710000002124113072744706031403 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Cisco Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/kafka_broker.py """ import datetime import uuid import mock from oslo_utils import netutils from ceilometer.event.storage import models as event from ceilometer.publisher import kafka_broker as kafka from ceilometer.publisher import messaging as msg_publisher from ceilometer import sample from ceilometer.tests import base as tests_base @mock.patch('ceilometer.publisher.kafka_broker.LOG', mock.Mock()) @mock.patch('ceilometer.publisher.kafka_broker.kafka.KafkaClient', mock.Mock()) class TestKafkaPublisher(tests_base.BaseTestCase): test_event_data = [ event.Event(message_id=uuid.uuid4(), event_type='event_%d' % i, generated=datetime.datetime.utcnow(), traits=[], raw={}) for i in range(0, 5) ] test_data = [ sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test3', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] def test_publish(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer')) with mock.patch.object(publisher, '_producer') as fake_producer: publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) self.assertEqual(0, len(publisher.local_queue)) def test_publish_without_options(self): publisher = kafka.KafkaBrokerPublisher( netutils.urlsplit('kafka://127.0.0.1:9092')) with mock.patch.object(publisher, '_producer') as fake_producer: publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) self.assertEqual(0, len(publisher.local_queue)) def test_publish_to_host_without_policy(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer')) self.assertEqual('default', publisher.policy) publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer&policy=test')) self.assertEqual('default', publisher.policy) def test_publish_to_host_with_default_policy(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer&policy=default')) with mock.patch.object(publisher, '_producer') as fake_producer: fake_producer.send_messages.side_effect = TypeError self.assertRaises(msg_publisher.DeliveryFailure, publisher.publish_samples, mock.MagicMock(), self.test_data) self.assertEqual(100, len(fake_producer.send_messages.mock_calls)) self.assertEqual(0, len(publisher.local_queue)) def test_publish_to_host_with_drop_policy(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer&policy=drop')) with mock.patch.object(publisher, '_producer') as fake_producer: fake_producer.send_messages.side_effect = Exception("test") publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(1, len(fake_producer.send_messages.mock_calls)) self.assertEqual(0, len(publisher.local_queue)) def test_publish_to_host_with_queue_policy(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) with mock.patch.object(publisher, '_producer') as fake_producer: fake_producer.send_messages.side_effect = Exception("test") publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(1, len(fake_producer.send_messages.mock_calls)) self.assertEqual(1, len(publisher.local_queue)) def test_publish_to_down_host_with_default_queue_size(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) with mock.patch.object(publisher, '_producer') as fake_producer: fake_producer.send_messages.side_effect = Exception("test") for i in range(0, 2000): for s in self.test_data: s.name = 'test-%d' % i publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(1024, len(publisher.local_queue)) self.assertEqual('test-976', publisher.local_queue[0][2][0]['counter_name']) self.assertEqual('test-1999', publisher.local_queue[1023][2][0]['counter_name']) def test_publish_to_host_from_down_to_up_with_queue(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) with mock.patch.object(publisher, '_producer') as fake_producer: fake_producer.send_messages.side_effect = Exception("test") for i in range(0, 16): for s in self.test_data: s.name = 'test-%d' % i publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(16, len(publisher.local_queue)) fake_producer.send_messages.side_effect = None for s in self.test_data: s.name = 'test-%d' % 16 publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(0, len(publisher.local_queue)) def test_publish_event_with_default_policy(self): publisher = kafka.KafkaBrokerPublisher( netutils.urlsplit('kafka://127.0.0.1:9092?topic=ceilometer')) with mock.patch.object(publisher, '_producer') as fake_producer: publisher.publish_events(mock.MagicMock(), self.test_event_data) self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) with mock.patch.object(publisher, '_producer') as fake_producer: fake_producer.send_messages.side_effect = Exception("test") self.assertRaises(msg_publisher.DeliveryFailure, publisher.publish_events, mock.MagicMock(), self.test_event_data) self.assertEqual(100, len(fake_producer.send_messages.mock_calls)) self.assertEqual(0, len(publisher.local_queue)) ceilometer-6.1.5/ceilometer/tests/unit/publisher/test_udp.py0000664000567000056710000001315413072744706025501 0ustar jenkinsjenkins00000000000000# # Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/udp.py """ import datetime import socket import mock import msgpack from oslo_config import fixture as fixture_config from oslo_utils import netutils from oslotest import base from ceilometer.publisher import udp from ceilometer.publisher import utils from ceilometer import sample COUNTER_SOURCE = 'testsource' class TestUDPPublisher(base.BaseTestCase): test_data = [ sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test3', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), ] @staticmethod def _make_fake_socket(published): def _fake_socket_socket(family, type): def record_data(msg, dest): published.append((msg, dest)) udp_socket = mock.Mock() udp_socket.sendto = record_data return udp_socket return _fake_socket_socket def setUp(self): super(TestUDPPublisher, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF.publisher.telemetry_secret = 'not-so-secret' def _check_udp_socket(self, url, expected_addr_family): with mock.patch.object(socket, 'socket') as mock_socket: udp.UDPPublisher(netutils.urlsplit(url)) mock_socket.assert_called_with(expected_addr_family, socket.SOCK_DGRAM) def test_publisher_udp_socket_ipv4(self): self._check_udp_socket('udp://127.0.0.1:4952', socket.AF_INET) def test_publisher_udp_socket_ipv6(self): self._check_udp_socket('udp://[::1]:4952', socket.AF_INET6) def test_published(self): self.data_sent = [] with mock.patch('socket.socket', self._make_fake_socket(self.data_sent)): publisher = udp.UDPPublisher( netutils.urlsplit('udp://somehost')) publisher.publish_samples(None, self.test_data) self.assertEqual(5, len(self.data_sent)) sent_counters = [] for data, dest in self.data_sent: counter = msgpack.loads(data, encoding="utf-8") sent_counters.append(counter) # Check destination self.assertEqual(('somehost', self.CONF.collector.udp_port), dest) # Check that counters are equal def sort_func(counter): return counter['counter_name'] counters = [utils.meter_message_from_counter(d, "not-so-secret") for d in self.test_data] counters.sort(key=sort_func) sent_counters.sort(key=sort_func) self.assertEqual(counters, sent_counters) @staticmethod def _raise_ioerror(*args): raise IOError def _make_broken_socket(self, family, type): udp_socket = mock.Mock() udp_socket.sendto = self._raise_ioerror return udp_socket def test_publish_error(self): with mock.patch('socket.socket', self._make_broken_socket): publisher = udp.UDPPublisher( netutils.urlsplit('udp://localhost')) publisher.publish_samples(None, self.test_data) ceilometer-6.1.5/ceilometer/tests/unit/publisher/test_file.py0000664000567000056710000001045313072744706025627 0ustar jenkinsjenkins00000000000000# # Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/file.py """ import datetime import logging.handlers import os import tempfile from oslo_utils import netutils from oslotest import base from ceilometer.publisher import file from ceilometer import sample class TestFilePublisher(base.BaseTestCase): test_data = [ sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] def test_file_publisher_maxbytes(self): # Test valid configurations tempdir = tempfile.mkdtemp() name = '%s/log_file' % tempdir parsed_url = netutils.urlsplit('file://%s?max_bytes=50&backup_count=3' % name) publisher = file.FilePublisher(parsed_url) publisher.publish_samples(None, self.test_data) handler = publisher.publisher_logger.handlers[0] self.assertIsInstance(handler, logging.handlers.RotatingFileHandler) self.assertEqual([50, name, 3], [handler.maxBytes, handler.baseFilename, handler.backupCount]) # The rotating file gets created since only allow 50 bytes. self.assertTrue(os.path.exists('%s.1' % name)) def test_file_publisher(self): # Test missing max bytes, backup count configurations tempdir = tempfile.mkdtemp() name = '%s/log_file_plain' % tempdir parsed_url = netutils.urlsplit('file://%s' % name) publisher = file.FilePublisher(parsed_url) publisher.publish_samples(None, self.test_data) handler = publisher.publisher_logger.handlers[0] self.assertIsInstance(handler, logging.handlers.RotatingFileHandler) self.assertEqual([0, name, 0], [handler.maxBytes, handler.baseFilename, handler.backupCount]) # Test the content is corrected saved in the file self.assertTrue(os.path.exists(name)) with open(name, 'r') as f: content = f.read() for sample_item in self.test_data: self.assertIn(sample_item.id, content) self.assertIn(sample_item.timestamp, content) def test_file_publisher_invalid(self): # Test invalid max bytes, backup count configurations tempdir = tempfile.mkdtemp() parsed_url = netutils.urlsplit( 'file://%s/log_file_bad' '?max_bytes=yus&backup_count=5y' % tempdir) publisher = file.FilePublisher(parsed_url) publisher.publish_samples(None, self.test_data) self.assertIsNone(publisher.publisher_logger) ceilometer-6.1.5/ceilometer/tests/unit/publisher/__init__.py0000664000567000056710000000000013072744703025370 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/test_coordination.py0000664000567000056710000002441413072744706025405 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import mock from oslo_config import fixture as fixture_config import tooz.coordination from ceilometer import coordination from ceilometer.tests import base from ceilometer import utils class MockToozCoordinator(object): def __init__(self, member_id, shared_storage): self._member_id = member_id self._groups = shared_storage self.is_started = False def start(self): self.is_started = True def stop(self): pass def heartbeat(self): pass def create_group(self, group_id): if group_id in self._groups: return MockAsyncError( tooz.coordination.GroupAlreadyExist(group_id)) self._groups[group_id] = {} return MockAsyncResult(None) def join_group(self, group_id, capabilities=b''): if group_id not in self._groups: return MockAsyncError( tooz.coordination.GroupNotCreated(group_id)) if self._member_id in self._groups[group_id]: return MockAsyncError( tooz.coordination.MemberAlreadyExist(group_id, self._member_id)) self._groups[group_id][self._member_id] = { "capabilities": capabilities, } return MockAsyncResult(None) def leave_group(self, group_id): return MockAsyncResult(None) def get_members(self, group_id): if group_id not in self._groups: return MockAsyncError( tooz.coordination.GroupNotCreated(group_id)) return MockAsyncResult(self._groups[group_id]) class MockToozCoordExceptionRaiser(MockToozCoordinator): def start(self): raise tooz.coordination.ToozError('error') def heartbeat(self): raise tooz.coordination.ToozError('error') def join_group(self, group_id, capabilities=b''): raise tooz.coordination.ToozError('error') def get_members(self, group_id): raise tooz.coordination.ToozError('error') class MockToozCoordExceptionOnJoinRaiser(MockToozCoordinator): def __init__(self, member_id, shared_storage, retry_count=None): super(MockToozCoordExceptionOnJoinRaiser, self).__init__(member_id, shared_storage) self.tooz_error_count = retry_count self.count = 0 def join_group(self, group_id, capabilities=b''): if self.count == self.tooz_error_count: return MockAsyncResult(None) else: self.count += 1 raise tooz.coordination.ToozError('error') class MockAsyncResult(tooz.coordination.CoordAsyncResult): def __init__(self, result): self.result = result def get(self, timeout=0): return self.result @staticmethod def done(): return True class MockAsyncError(tooz.coordination.CoordAsyncResult): def __init__(self, error): self.error = error def get(self, timeout=0): raise self.error @staticmethod def done(): return True class MockLoggingHandler(logging.Handler): """Mock logging handler to check for expected logs.""" def __init__(self, *args, **kwargs): self.reset() logging.Handler.__init__(self, *args, **kwargs) def emit(self, record): self.messages[record.levelname.lower()].append(record.getMessage()) def reset(self): self.messages = {'debug': [], 'info': [], 'warning': [], 'error': [], 'critical': []} class TestPartitioning(base.BaseTestCase): def setUp(self): super(TestPartitioning, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.str_handler = MockLoggingHandler() coordination.LOG.logger.addHandler(self.str_handler) self.shared_storage = {} def _get_new_started_coordinator(self, shared_storage, agent_id=None, coordinator_cls=None, retry_count=None): coordinator_cls = coordinator_cls or MockToozCoordinator self.CONF.set_override('backend_url', 'xxx://yyy', group='coordination') with mock.patch('tooz.coordination.get_coordinator', lambda _, member_id: coordinator_cls(member_id, shared_storage, retry_count) if retry_count else coordinator_cls(member_id, shared_storage)): pc = coordination.PartitionCoordinator(agent_id) pc.start() return pc def _usage_simulation(self, *agents_kwargs): partition_coordinators = [] for kwargs in agents_kwargs: partition_coordinator = self._get_new_started_coordinator( self.shared_storage, kwargs['agent_id'], kwargs.get( 'coordinator_cls')) partition_coordinator.join_group(kwargs['group_id']) partition_coordinators.append(partition_coordinator) for i, kwargs in enumerate(agents_kwargs): all_resources = kwargs.get('all_resources', []) expected_resources = kwargs.get('expected_resources', []) actual_resources = partition_coordinators[i].extract_my_subset( kwargs['group_id'], all_resources) self.assertEqual(expected_resources, actual_resources) def test_single_group(self): agents = [dict(agent_id='agent1', group_id='group'), dict(agent_id='agent2', group_id='group')] self._usage_simulation(*agents) self.assertEqual(['group'], sorted(self.shared_storage.keys())) self.assertEqual(['agent1', 'agent2'], sorted(self.shared_storage['group'].keys())) def test_multiple_groups(self): agents = [dict(agent_id='agent1', group_id='group1'), dict(agent_id='agent2', group_id='group2')] self._usage_simulation(*agents) self.assertEqual(['group1', 'group2'], sorted(self.shared_storage.keys())) def test_partitioning(self): all_resources = ['resource_%s' % i for i in range(1000)] agents = ['agent_%s' % i for i in range(10)] expected_resources = [list() for _ in range(len(agents))] hr = utils.HashRing(agents) for r in all_resources: key = agents.index(hr.get_node(r)) expected_resources[key].append(r) agents_kwargs = [] for i, agent in enumerate(agents): agents_kwargs.append(dict(agent_id=agent, group_id='group', all_resources=all_resources, expected_resources=expected_resources[i])) self._usage_simulation(*agents_kwargs) def test_coordination_backend_offline(self): agents = [dict(agent_id='agent1', group_id='group', all_resources=['res1', 'res2'], expected_resources=[], coordinator_cls=MockToozCoordExceptionRaiser)] self._usage_simulation(*agents) expected_errors = ['Error getting group membership info from ' 'coordination backend.', 'Error connecting to coordination backend.'] for e in expected_errors: self.assertIn(e, self.str_handler.messages['error']) def test_coordination_backend_connection_fail_on_join(self): coord = self._get_new_started_coordinator( {'group'}, 'agent1', MockToozCoordExceptionOnJoinRaiser, retry_count=2) with mock.patch('tooz.coordination.get_coordinator', return_value=MockToozCoordExceptionOnJoinRaiser): coord.join_group(group_id='group') expected_errors = ['Error joining partitioning group group,' ' re-trying', 'Error joining partitioning group group,' ' re-trying'] self.assertEqual(expected_errors, self.str_handler.messages['error']) def test_reconnect(self): coord = self._get_new_started_coordinator({}, 'a', MockToozCoordExceptionRaiser) with mock.patch('tooz.coordination.get_coordinator', return_value=MockToozCoordExceptionRaiser('a', {})): coord.heartbeat() expected_errors = ['Error connecting to coordination backend.', 'Error sending a heartbeat to coordination ' 'backend.'] for e in expected_errors: self.assertIn(e, self.str_handler.messages['error']) self.str_handler.messages['error'] = [] with mock.patch('tooz.coordination.get_coordinator', return_value=MockToozCoordinator('a', {})): coord.heartbeat() for e in expected_errors: self.assertNotIn(e, self.str_handler.messages['error']) def test_group_id_none(self): coord = self._get_new_started_coordinator({}, 'a') self.assertTrue(coord._coordinator.is_started) with mock.patch.object(coord._coordinator, 'join_group') as mocked: coord.join_group(None) self.assertEqual(0, mocked.call_count) with mock.patch.object(coord._coordinator, 'leave_group') as mocked: coord.leave_group(None) self.assertEqual(0, mocked.call_count) def test_stop(self): coord = self._get_new_started_coordinator({}, 'a') self.assertTrue(coord._coordinator.is_started) coord.join_group("123") coord.stop() self.assertIsEmpty(coord._groups) self.assertIsNone(coord._coordinator) ceilometer-6.1.5/ceilometer/tests/unit/objectstore/0000775000567000056710000000000013072745164023621 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/objectstore/test_rgw_client.py0000664000567000056710000001407313072744706027375 0ustar jenkinsjenkins00000000000000# Copyright (C) 2015 Reliance Jio Infocomm Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import mock from oslotest import base from ceilometer.objectstore.rgw_client import RGWAdminAPIFailed from ceilometer.objectstore.rgw_client import RGWAdminClient RGW_ADMIN_BUCKETS = ''' [ { "max_marker": "", "ver": 2001, "usage": { "rgw.main": { "size_kb_actual": 16000, "num_objects": 1000, "size_kb": 1000 } }, "bucket": "somefoo", "owner": "admin", "master_ver": 0, "mtime": 1420176126, "marker": "default.4126.1", "bucket_quota": { "max_objects": -1, "enabled": false, "max_size_kb": -1 }, "id": "default.4126.1", "pool": ".rgw.buckets", "index_pool": ".rgw.buckets.index" }, { "max_marker": "", "ver": 3, "usage": { "rgw.main": { "size_kb_actual": 43, "num_objects": 1, "size_kb": 42 } }, "bucket": "somefoo31", "owner": "admin", "master_ver": 0, "mtime": 1420176134, "marker": "default.4126.5", "bucket_quota": { "max_objects": -1, "enabled": false, "max_size_kb": -1 }, "id": "default.4126.5", "pool": ".rgw.buckets", "index_pool": ".rgw.buckets.index" } ]''' RGW_ADMIN_USAGE = ''' { "entries": [ { "owner": "5f7fe2d5352e466f948f49341e33d107", "buckets": [ { "bucket": "", "time": "2015-01-23 09:00:00.000000Z", "epoch": 1422003600, "categories": [ { "category": "list_buckets", "bytes_sent": 46, "bytes_received": 0, "ops": 3, "successful_ops": 3}, { "category": "stat_account", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 1}]}, { "bucket": "foodsgh", "time": "2015-01-23 09:00:00.000000Z", "epoch": 1422003600, "categories": [ { "category": "create_bucket", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 1}, { "category": "get_obj", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 0}, { "category": "put_obj", "bytes_sent": 0, "bytes_received": 238, "ops": 1, "successful_ops": 1}]}]}], "summary": [ { "user": "5f7fe2d5352e466f948f49341e33d107", "categories": [ { "category": "create_bucket", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 1}, { "category": "get_obj", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 0}, { "category": "list_buckets", "bytes_sent": 46, "bytes_received": 0, "ops": 3, "successful_ops": 3}, { "category": "put_obj", "bytes_sent": 0, "bytes_received": 238, "ops": 1, "successful_ops": 1}, { "category": "stat_account", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 1}], "total": { "bytes_sent": 46, "bytes_received": 238, "ops": 7, "successful_ops": 6}}]} ''' buckets_json = json.loads(RGW_ADMIN_BUCKETS) usage_json = json.loads(RGW_ADMIN_USAGE) class TestRGWAdminClient(base.BaseTestCase): def setUp(self): super(TestRGWAdminClient, self).setUp() self.client = RGWAdminClient('http://127.0.0.1:8080/admin', 'abcde', 'secret') self.get_resp = mock.MagicMock() self.get = mock.patch('requests.get', return_value=self.get_resp).start() def test_make_request_exception(self): self.get_resp.status_code = 403 self.assertRaises(RGWAdminAPIFailed, self.client._make_request, *('foo', {})) def test_make_request(self): self.get_resp.status_code = 200 self.get_resp.json.return_value = buckets_json actual = self.client._make_request('foo', []) self.assertEqual(buckets_json, actual) def test_get_buckets(self): self.get_resp.status_code = 200 self.get_resp.json.return_value = buckets_json actual = self.client.get_bucket('foo') bucket_list = [RGWAdminClient.Bucket('somefoo', 1000, 1000), RGWAdminClient.Bucket('somefoo31', 1, 42), ] expected = {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, 'buckets': bucket_list} self.assertEqual(expected, actual) def test_get_usage(self): self.get_resp.status_code = 200 self.get_resp.json.return_value = usage_json actual = self.client.get_usage('foo') expected = 7 self.assertEqual(expected, actual) ceilometer-6.1.5/ceilometer/tests/unit/objectstore/test_swift.py0000664000567000056710000002253213072744706026373 0ustar jenkinsjenkins00000000000000# Copyright 2012 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from keystoneauth1 import exceptions import mock from oslotest import base from oslotest import mockpatch from swiftclient import client as swift_client import testscenarios.testcase from ceilometer.agent import manager from ceilometer.objectstore import swift HEAD_ACCOUNTS = [('tenant-000', {'x-account-object-count': 12, 'x-account-bytes-used': 321321321, 'x-account-container-count': 7, }), ('tenant-001', {'x-account-object-count': 34, 'x-account-bytes-used': 9898989898, 'x-account-container-count': 17, }), ('tenant-002-ignored', {'x-account-object-count': 34, 'x-account-bytes-used': 9898989898, 'x-account-container-count': 17, })] GET_ACCOUNTS = [('tenant-000', ({'x-account-object-count': 10, 'x-account-bytes-used': 123123, 'x-account-container-count': 2, }, [{'count': 10, 'bytes': 123123, 'name': 'my_container'}, {'count': 0, 'bytes': 0, 'name': 'new_container' }])), ('tenant-001', ({'x-account-object-count': 0, 'x-account-bytes-used': 0, 'x-account-container-count': 0, }, [])), ('tenant-002-ignored', ({'x-account-object-count': 0, 'x-account-bytes-used': 0, 'x-account-container-count': 0, }, []))] Tenant = collections.namedtuple('Tenant', 'id') ASSIGNED_TENANTS = [Tenant('tenant-000'), Tenant('tenant-001')] class TestManager(manager.AgentManager): def __init__(self): super(TestManager, self).__init__() self._keystone = mock.MagicMock() self._keystone_last_exception = None self._service_catalog = (self._keystone.session.auth. get_access.return_value.service_catalog) self._auth_token = (self._keystone.session.auth. get_access.return_value.auth_token) class TestSwiftPollster(testscenarios.testcase.WithScenarios, base.BaseTestCase): # Define scenarios to run all of the tests against all of the # pollsters. scenarios = [ ('storage.objects', {'factory': swift.ObjectsPollster}), ('storage.objects.size', {'factory': swift.ObjectsSizePollster}), ('storage.objects.containers', {'factory': swift.ObjectsContainersPollster}), ('storage.containers.objects', {'factory': swift.ContainersObjectsPollster}), ('storage.containers.objects.size', {'factory': swift.ContainersSizePollster}), ] @staticmethod def fake_ks_service_catalog_url_for(*args, **kwargs): raise exceptions.EndpointNotFound("Fake keystone exception") def fake_iter_accounts(self, ksclient, cache, tenants): tenant_ids = [t.id for t in tenants] for i in self.ACCOUNTS: if i[0] in tenant_ids: yield i @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(TestSwiftPollster, self).setUp() self.pollster = self.factory() self.manager = TestManager() if self.pollster.CACHE_KEY_METHOD == 'swift.head_account': self.ACCOUNTS = HEAD_ACCOUNTS else: self.ACCOUNTS = GET_ACCOUNTS def tearDown(self): super(TestSwiftPollster, self).tearDown() swift._Base._ENDPOINT = None def test_iter_accounts_no_cache(self): cache = {} with mockpatch.PatchObject(self.factory, '_get_account_info', return_value=[]): data = list(self.pollster._iter_accounts(mock.Mock(), cache, ASSIGNED_TENANTS)) self.assertIn(self.pollster.CACHE_KEY_METHOD, cache) self.assertEqual([], data) def test_iter_accounts_cached(self): # Verify that if a method has already been called, _iter_accounts # uses the cached version and doesn't call swiftclient. mock_method = mock.Mock() mock_method.side_effect = AssertionError( 'should not be called', ) api_method = '%s_account' % self.pollster.METHOD with mockpatch.PatchObject(swift_client, api_method, new=mock_method): with mockpatch.PatchObject(self.factory, '_neaten_url'): cache = {self.pollster.CACHE_KEY_METHOD: [self.ACCOUNTS[0]]} data = list(self.pollster._iter_accounts(mock.Mock(), cache, ASSIGNED_TENANTS)) self.assertEqual([self.ACCOUNTS[0]], data) def test_neaten_url(self): test_endpoints = ['http://127.0.0.1:8080', 'http://127.0.0.1:8080/swift'] test_tenant_id = 'a7fd1695fa154486a647e44aa99a1b9b' for test_endpoint in test_endpoints: standard_url = test_endpoint + '/v1/AUTH_' + test_tenant_id url = swift._Base._neaten_url(test_endpoint, test_tenant_id) self.assertEqual(standard_url, url) url = swift._Base._neaten_url(test_endpoint + '/', test_tenant_id) self.assertEqual(standard_url, url) url = swift._Base._neaten_url(test_endpoint + '/v1', test_tenant_id) self.assertEqual(standard_url, url) url = swift._Base._neaten_url(standard_url, test_tenant_id) self.assertEqual(standard_url, url) def test_metering(self): with mockpatch.PatchObject(self.factory, '_iter_accounts', side_effect=self.fake_iter_accounts): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(2, len(samples), self.pollster.__class__) def test_get_meter_names(self): with mockpatch.PatchObject(self.factory, '_iter_accounts', side_effect=self.fake_iter_accounts): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(set([samples[0].name]), set([s.name for s in samples])) def test_only_poll_assigned(self): mock_method = mock.MagicMock() endpoint = 'end://point/' api_method = '%s_account' % self.pollster.METHOD with mockpatch.PatchObject(swift_client, api_method, new=mock_method): with mockpatch.PatchObject( self.manager._service_catalog, 'url_for', return_value=endpoint): list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) expected = [mock.call(self.pollster._neaten_url(endpoint, t.id), self.manager._auth_token) for t in ASSIGNED_TENANTS] self.assertEqual(expected, mock_method.call_args_list) def test_get_endpoint_only_once(self): endpoint = 'end://point/' mock_url_for = mock.MagicMock(return_value=endpoint) api_method = '%s_account' % self.pollster.METHOD with mockpatch.PatchObject(swift_client, api_method, new=mock.MagicMock()): with mockpatch.PatchObject( self.manager._service_catalog, 'url_for', new=mock_url_for): list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(1, mock_url_for.call_count) def test_endpoint_notfound(self): with mockpatch.PatchObject( self.manager._service_catalog, 'url_for', side_effect=self.fake_ks_service_catalog_url_for): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(0, len(samples)) ceilometer-6.1.5/ceilometer/tests/unit/objectstore/test_rgw.py0000664000567000056710000001634313072744706026041 0ustar jenkinsjenkins00000000000000# Copyright 2015 Reliance Jio Infocomm Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from keystoneauth1 import exceptions import mock from oslotest import base from oslotest import mockpatch import testscenarios.testcase from ceilometer.agent import manager from ceilometer.objectstore import rgw from ceilometer.objectstore.rgw_client import RGWAdminClient as rgw_client bucket_list1 = [rgw_client.Bucket('somefoo1', 10, 7)] bucket_list2 = [rgw_client.Bucket('somefoo2', 2, 9)] bucket_list3 = [rgw_client.Bucket('unlisted', 100, 100)] GET_BUCKETS = [('tenant-000', {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, 'buckets': bucket_list1}), ('tenant-001', {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, 'buckets': bucket_list2}), ('tenant-002-ignored', {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, 'buckets': bucket_list3})] GET_USAGE = [('tenant-000', 10), ('tenant-001', 11), ('tenant-002-ignored', 12)] Tenant = collections.namedtuple('Tenant', 'id') ASSIGNED_TENANTS = [Tenant('tenant-000'), Tenant('tenant-001')] class TestManager(manager.AgentManager): def __init__(self): super(TestManager, self).__init__() self._keystone = mock.Mock() self._catalog = (self._keystone.session.auth.get_access. return_value.service_catalog) self._catalog.url_for.return_value = 'http://foobar/endpoint' class TestRgwPollster(testscenarios.testcase.WithScenarios, base.BaseTestCase): # Define scenarios to run all of the tests against all of the # pollsters. scenarios = [ ('radosgw.objects', {'factory': rgw.ObjectsPollster}), ('radosgw.objects.size', {'factory': rgw.ObjectsSizePollster}), ('radosgw.objects.containers', {'factory': rgw.ObjectsContainersPollster}), ('radosgw.containers.objects', {'factory': rgw.ContainersObjectsPollster}), ('radosgw.containers.objects.size', {'factory': rgw.ContainersSizePollster}), ('radosgw.api.request', {'factory': rgw.UsagePollster}), ] @staticmethod def fake_ks_service_catalog_url_for(*args, **kwargs): raise exceptions.EndpointNotFound("Fake keystone exception") def fake_iter_accounts(self, ksclient, cache, tenants): tenant_ids = [t.id for t in tenants] for i in self.ACCOUNTS: if i[0] in tenant_ids: yield i @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(TestRgwPollster, self).setUp() self.pollster = self.factory() self.manager = TestManager() if self.pollster.CACHE_KEY_METHOD == 'rgw.get_bucket': self.ACCOUNTS = GET_BUCKETS else: self.ACCOUNTS = GET_USAGE def tearDown(self): super(TestRgwPollster, self).tearDown() rgw._Base._ENDPOINT = None def test_iter_accounts_no_cache(self): cache = {} with mockpatch.PatchObject(self.factory, '_get_account_info', return_value=[]): data = list(self.pollster._iter_accounts(mock.Mock(), cache, ASSIGNED_TENANTS)) self.assertIn(self.pollster.CACHE_KEY_METHOD, cache) self.assertEqual([], data) def test_iter_accounts_cached(self): # Verify that if a method has already been called, _iter_accounts # uses the cached version and doesn't call rgw_clinet. mock_method = mock.Mock() mock_method.side_effect = AssertionError( 'should not be called', ) api_method = 'get_%s' % self.pollster.METHOD with mockpatch.PatchObject(rgw_client, api_method, new=mock_method): cache = {self.pollster.CACHE_KEY_METHOD: [self.ACCOUNTS[0]]} data = list(self.pollster._iter_accounts(mock.Mock(), cache, ASSIGNED_TENANTS)) self.assertEqual([self.ACCOUNTS[0]], data) def test_metering(self): with mockpatch.PatchObject(self.factory, '_iter_accounts', side_effect=self.fake_iter_accounts): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(2, len(samples), self.pollster.__class__) def test_get_meter_names(self): with mockpatch.PatchObject(self.factory, '_iter_accounts', side_effect=self.fake_iter_accounts): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(set([samples[0].name]), set([s.name for s in samples])) def test_only_poll_assigned(self): mock_method = mock.MagicMock() endpoint = 'http://127.0.0.1:8000/admin' api_method = 'get_%s' % self.pollster.METHOD with mockpatch.PatchObject(rgw_client, api_method, new=mock_method): with mockpatch.PatchObject( self.manager._catalog, 'url_for', return_value=endpoint): list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) expected = [mock.call(t.id) for t in ASSIGNED_TENANTS] self.assertEqual(expected, mock_method.call_args_list) def test_get_endpoint_only_once(self): mock_url_for = mock.MagicMock() mock_url_for.return_value = '/endpoint' api_method = 'get_%s' % self.pollster.METHOD with mockpatch.PatchObject(rgw_client, api_method, new=mock.MagicMock()): with mockpatch.PatchObject( self.manager._catalog, 'url_for', new=mock_url_for): list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(1, mock_url_for.call_count) def test_endpoint_notfound(self): with mockpatch.PatchObject( self.manager._catalog, 'url_for', side_effect=self.fake_ks_service_catalog_url_for): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(0, len(samples)) ceilometer-6.1.5/ceilometer/tests/unit/objectstore/__init__.py0000664000567000056710000000000013072744703025716 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/test_utils.py0000664000567000056710000001510013072744706024045 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/utils.py """ import datetime import decimal from oslotest import base from ceilometer import utils class TestUtils(base.BaseTestCase): def test_datetime_to_decimal(self): expected = 1356093296.12 utc_datetime = datetime.datetime.utcfromtimestamp(expected) actual = utils.dt_to_decimal(utc_datetime) self.assertAlmostEqual(expected, float(actual), places=5) def test_decimal_to_datetime(self): expected = 1356093296.12 dexpected = decimal.Decimal(str(expected)) # Python 2.6 wants str() expected_datetime = datetime.datetime.utcfromtimestamp(expected) actual_datetime = utils.decimal_to_dt(dexpected) # Python 3 have rounding issue on this, so use float self.assertAlmostEqual(utils.dt_to_decimal(expected_datetime), utils.dt_to_decimal(actual_datetime), places=5) def test_recursive_keypairs(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B'}} pairs = list(utils.recursive_keypairs(data)) self.assertEqual([('a', 'A'), ('b', 'B'), ('nested:a', 'A'), ('nested:b', 'B')], pairs) def test_recursive_keypairs_with_separator(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', }, } separator = '.' pairs = list(utils.recursive_keypairs(data, separator)) self.assertEqual([('a', 'A'), ('b', 'B'), ('nested.a', 'A'), ('nested.b', 'B')], pairs) def test_recursive_keypairs_with_list_of_dict(self): small = 1 big = 1 << 64 expected = [('a', 'A'), ('b', 'B'), ('nested:list', [{small: 99, big: 42}])] data = {'a': 'A', 'b': 'B', 'nested': {'list': [{small: 99, big: 42}]}} pairs = list(utils.recursive_keypairs(data)) self.assertEqual(len(expected), len(pairs)) for k, v in pairs: # the keys 1 and 1<<64 cause a hash collision on 64bit platforms if k == 'nested:list': self.assertIn(v, [[{small: 99, big: 42}], [{big: 42, small: 99}]]) else: self.assertIn((k, v), expected) def test_restore_nesting_unested(self): metadata = {'a': 'A', 'b': 'B'} unwound = utils.restore_nesting(metadata) self.assertIs(metadata, unwound) def test_restore_nesting(self): metadata = {'a': 'A', 'b': 'B', 'nested:a': 'A', 'nested:b': 'B', 'nested:twice:c': 'C', 'nested:twice:d': 'D', 'embedded:e': 'E'} unwound = utils.restore_nesting(metadata) expected = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', 'twice': {'c': 'C', 'd': 'D'}}, 'embedded': {'e': 'E'}} self.assertEqual(expected, unwound) self.assertIsNot(metadata, unwound) def test_restore_nesting_with_separator(self): metadata = {'a': 'A', 'b': 'B', 'nested.a': 'A', 'nested.b': 'B', 'nested.twice.c': 'C', 'nested.twice.d': 'D', 'embedded.e': 'E'} unwound = utils.restore_nesting(metadata, separator='.') expected = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', 'twice': {'c': 'C', 'd': 'D'}}, 'embedded': {'e': 'E'}} self.assertEqual(expected, unwound) self.assertIsNot(metadata, unwound) def test_decimal_to_dt_with_none_parameter(self): self.assertIsNone(utils.decimal_to_dt(None)) def test_dict_to_kv(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', }, 'nested2': [{'c': 'A'}, {'c': 'B'}] } pairs = list(utils.dict_to_keyval(data)) self.assertEqual([('a', 'A'), ('b', 'B'), ('nested.a', 'A'), ('nested.b', 'B'), ('nested2[0].c', 'A'), ('nested2[1].c', 'B')], sorted(pairs, key=lambda x: x[0])) def test_hash_of_set(self): x = ['a', 'b'] y = ['a', 'b', 'a'] z = ['a', 'c'] self.assertEqual(utils.hash_of_set(x), utils.hash_of_set(y)) self.assertNotEqual(utils.hash_of_set(x), utils.hash_of_set(z)) self.assertNotEqual(utils.hash_of_set(y), utils.hash_of_set(z)) def test_hash_ring(self): num_nodes = 10 num_keys = 1000 nodes = [str(x) for x in range(num_nodes)] hr = utils.HashRing(nodes) buckets = [0] * num_nodes assignments = [-1] * num_keys for k in range(num_keys): n = int(hr.get_node(str(k))) self.assertTrue(0 <= n <= num_nodes) buckets[n] += 1 assignments[k] = n # at least something in each bucket self.assertTrue(all((c > 0 for c in buckets))) # approximately even distribution diff = max(buckets) - min(buckets) self.assertTrue(diff < 0.3 * (num_keys / num_nodes)) # consistency num_nodes += 1 nodes.append(str(num_nodes + 1)) hr = utils.HashRing(nodes) for k in range(num_keys): n = int(hr.get_node(str(k))) assignments[k] -= n reassigned = len([c for c in assignments if c != 0]) self.assertTrue(reassigned < num_keys / num_nodes) ceilometer-6.1.5/ceilometer/tests/unit/api/0000775000567000056710000000000013072745164022047 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/api/test_app.py0000664000567000056710000000455313072744706024250 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_config import fixture as fixture_config from oslo_log import log from ceilometer.api import app from ceilometer.tests import base class TestApp(base.BaseTestCase): def setUp(self): super(TestApp, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf log.register_options(cfg.CONF) def test_api_paste_file_not_exist(self): self.CONF.set_override('api_paste_config', 'non-existent-file') with mock.patch.object(self.CONF, 'find_file') as ff: ff.return_value = None self.assertRaises(cfg.ConfigFilesNotFoundError, app.load_app) @mock.patch('ceilometer.storage.get_connection_from_config', mock.MagicMock()) @mock.patch('pecan.make_app') def test_pecan_debug(self, mocked): def _check_pecan_debug(g_debug, p_debug, expected, workers=1): self.CONF.set_override('debug', g_debug) if p_debug is not None: self.CONF.set_override('pecan_debug', p_debug, group='api') self.CONF.set_override('workers', workers, group='api') app.setup_app() args, kwargs = mocked.call_args self.assertEqual(expected, kwargs.get('debug')) _check_pecan_debug(g_debug=False, p_debug=None, expected=False) _check_pecan_debug(g_debug=True, p_debug=None, expected=False) _check_pecan_debug(g_debug=True, p_debug=False, expected=False) _check_pecan_debug(g_debug=False, p_debug=True, expected=True) _check_pecan_debug(g_debug=True, p_debug=None, expected=False, workers=5) _check_pecan_debug(g_debug=False, p_debug=True, expected=False, workers=5) ceilometer-6.1.5/ceilometer/tests/unit/api/test_hooks.py0000664000567000056710000000250113072744706024602 0ustar jenkinsjenkins00000000000000# Copyright 2015 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import fixture as fixture_config import oslo_messaging from ceilometer.api import hooks from ceilometer.tests import base class TestTestNotifierHook(base.BaseTestCase): def setUp(self): super(TestTestNotifierHook, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf def test_init_notifier_with_drivers(self): self.CONF.set_override('telemetry_driver', 'messagingv2', group='publisher_notifier') hook = hooks.NotifierHook() notifier = hook.notifier self.assertIsInstance(notifier, oslo_messaging.Notifier) self.assertEqual(['messagingv2'], notifier._driver_names) ceilometer-6.1.5/ceilometer/tests/unit/api/test_versions.py0000664000567000056710000000310113072744703025321 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from ceilometer.tests.functional import api V2_MEDIA_TYPES = [ { 'base': 'application/json', 'type': 'application/vnd.openstack.telemetry-v2+json' }, { 'base': 'application/xml', 'type': 'application/vnd.openstack.telemetry-v2+xml' } ] V2_HTML_DESCRIPTION = { 'href': 'http://docs.openstack.org/', 'rel': 'describedby', 'type': 'text/html', } V2_EXPECTED_RESPONSE = { 'id': 'v2', 'links': [ { 'rel': 'self', 'href': 'http://localhost/v2', }, V2_HTML_DESCRIPTION ], 'media-types': V2_MEDIA_TYPES, 'status': 'stable', 'updated': '2013-02-13T00:00:00Z', } V2_VERSION_RESPONSE = { "version": V2_EXPECTED_RESPONSE } VERSIONS_RESPONSE = { "versions": { "values": [ V2_EXPECTED_RESPONSE ] } } class TestVersions(api.FunctionalTest): def test_versions(self): data = self.get_json('/') self.assertEqual(VERSIONS_RESPONSE, data) ceilometer-6.1.5/ceilometer/tests/unit/api/v2/0000775000567000056710000000000013072745164022376 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/api/v2/test_wsme_custom_type.py0000664000567000056710000000214413072744706027417 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base import wsme from ceilometer.api.controllers.v2 import base as v2_base class TestWsmeCustomType(base.BaseTestCase): def test_advenum_default(self): class dummybase(wsme.types.Base): ae = v2_base.AdvEnum("name", str, "one", "other", default="other") obj = dummybase() self.assertEqual("other", obj.ae) obj = dummybase(ae="one") self.assertEqual("one", obj.ae) self.assertRaises(wsme.exc.InvalidInput, dummybase, ae="not exists") ceilometer-6.1.5/ceilometer/tests/unit/api/v2/test_statistics.py0000664000567000056710000001022013072744703026172 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test statistics objects.""" import datetime from oslotest import base from ceilometer.api.controllers.v2 import meters class TestStatisticsDuration(base.BaseTestCase): def setUp(self): super(TestStatisticsDuration, self).setUp() # Create events relative to the range and pretend # that the intervening events exist. self.early1 = datetime.datetime(2012, 8, 27, 7, 0) self.early2 = datetime.datetime(2012, 8, 27, 17, 0) self.start = datetime.datetime(2012, 8, 28, 0, 0) self.middle1 = datetime.datetime(2012, 8, 28, 8, 0) self.middle2 = datetime.datetime(2012, 8, 28, 18, 0) self.end = datetime.datetime(2012, 8, 28, 23, 59) self.late1 = datetime.datetime(2012, 8, 29, 9, 0) self.late2 = datetime.datetime(2012, 8, 29, 19, 0) def test_nulls(self): s = meters.Statistics(duration_start=None, duration_end=None, start_timestamp=None, end_timestamp=None) self.assertIsNone(s.duration_start) self.assertIsNone(s.duration_end) self.assertIsNone(s.duration) def test_overlap_range_start(self): s = meters.Statistics(duration_start=self.early1, duration_end=self.middle1, start_timestamp=self.start, end_timestamp=self.end) self.assertEqual(self.start, s.duration_start) self.assertEqual(self.middle1, s.duration_end) self.assertEqual(8 * 60 * 60, s.duration) def test_within_range(self): s = meters.Statistics(duration_start=self.middle1, duration_end=self.middle2, start_timestamp=self.start, end_timestamp=self.end) self.assertEqual(self.middle1, s.duration_start) self.assertEqual(self.middle2, s.duration_end) self.assertEqual(10 * 60 * 60, s.duration) def test_within_range_zero_duration(self): s = meters.Statistics(duration_start=self.middle1, duration_end=self.middle1, start_timestamp=self.start, end_timestamp=self.end) self.assertEqual(self.middle1, s.duration_start) self.assertEqual(self.middle1, s.duration_end) self.assertEqual(0, s.duration) def test_overlap_range_end(self): s = meters.Statistics(duration_start=self.middle2, duration_end=self.late1, start_timestamp=self.start, end_timestamp=self.end) self.assertEqual(self.middle2, s.duration_start) self.assertEqual(self.end, s.duration_end) self.assertEqual(((6 * 60) - 1) * 60, s.duration) def test_after_range(self): s = meters.Statistics(duration_start=self.late1, duration_end=self.late2, start_timestamp=self.start, end_timestamp=self.end) self.assertIsNone(s.duration_start) self.assertIsNone(s.duration_end) self.assertIsNone(s.duration) def test_without_timestamp(self): s = meters.Statistics(duration_start=self.late1, duration_end=self.late2, start_timestamp=None, end_timestamp=None) self.assertEqual(self.late1, s.duration_start) self.assertEqual(self.late2, s.duration_end) ceilometer-6.1.5/ceilometer/tests/unit/api/v2/test_complex_query.py0000664000567000056710000003573313072744706026717 0ustar jenkinsjenkins00000000000000# # Copyright Ericsson AB 2013. All rights reserved # # Authors: Ildiko Vancsa # Balazs Gibizer # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test the methods related to complex query.""" import datetime import fixtures import jsonschema import mock from oslotest import base import wsme from ceilometer.api.controllers.v2 import query from ceilometer.storage import models class FakeComplexQuery(query.ValidatedComplexQuery): def __init__(self, db_model, additional_name_mapping=None, metadata=False): super(FakeComplexQuery, self).__init__(query=None, db_model=db_model, additional_name_mapping=( additional_name_mapping or {}), metadata_allowed=metadata) sample_name_mapping = {"resource": "resource_id", "meter": "counter_name", "type": "counter_type", "unit": "counter_unit", "volume": "counter_volume"} class TestComplexQuery(base.BaseTestCase): def setUp(self): super(TestComplexQuery, self).setUp() self.useFixture(fixtures.MonkeyPatch( 'pecan.response', mock.MagicMock())) self.query = FakeComplexQuery(models.Sample, sample_name_mapping, True) def test_replace_isotime_utc(self): filter_expr = {"=": {"timestamp": "2013-12-05T19:38:29Z"}} self.query._replace_isotime_with_datetime(filter_expr) self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), filter_expr["="]["timestamp"]) def test_replace_isotime_timezone_removed(self): filter_expr = {"=": {"timestamp": "2013-12-05T20:38:29+01:00"}} self.query._replace_isotime_with_datetime(filter_expr) self.assertEqual(datetime.datetime(2013, 12, 5, 20, 38, 29), filter_expr["="]["timestamp"]) def test_replace_isotime_wrong_syntax(self): filter_expr = {"=": {"timestamp": "not a valid isotime string"}} self.assertRaises(wsme.exc.ClientSideError, self.query._replace_isotime_with_datetime, filter_expr) def test_replace_isotime_in_complex_filter(self): filter_expr = {"and": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}}, {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]} self.query._replace_isotime_with_datetime(filter_expr) self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), filter_expr["and"][0]["="]["timestamp"]) self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29), filter_expr["and"][1]["="]["timestamp"]) def test_replace_isotime_in_complex_filter_with_unbalanced_tree(self): subfilter = {"and": [{"=": {"project_id": 42}}, {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]} filter_expr = {"or": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}}, subfilter]} self.query._replace_isotime_with_datetime(filter_expr) self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), filter_expr["or"][0]["="]["timestamp"]) self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29), filter_expr["or"][1]["and"][1]["="]["timestamp"]) def test_convert_operator_to_lower_case(self): filter_expr = {"AND": [{"=": {"project_id": 42}}, {"=": {"project_id": 44}}]} self.query._convert_operator_to_lower_case(filter_expr) self.assertEqual("and", list(filter_expr.keys())[0]) filter_expr = {"Or": [{"=": {"project_id": 43}}, {"anD": [{"=": {"project_id": 44}}, {"=": {"project_id": 42}}]}]} self.query._convert_operator_to_lower_case(filter_expr) self.assertEqual("or", list(filter_expr.keys())[0]) self.assertEqual("and", list(filter_expr["or"][1].keys())[0]) def test_invalid_filter_misstyped_field_name_samples(self): filter = {"=": {"project_id11": 42}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_invalid_complex_filter_wrong_field_names(self): filter = {"and": [{"=": {"non_existing_field": 42}}, {"=": {"project_id": 42}}]} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) filter = {"or": [{"=": {"non_existing_field": 42}}, {"and": [{"=": {"project_id": 44}}, {"=": {"project_id": 42}}]}]} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_convert_orderby(self): orderby = [] self.query._convert_orderby_to_lower_case(orderby) self.assertEqual([], orderby) orderby = [{"project_id": "DESC"}] self.query._convert_orderby_to_lower_case(orderby) self.assertEqual([{"project_id": "desc"}], orderby) orderby = [{"project_id": "ASC"}, {"resource_id": "DESC"}] self.query._convert_orderby_to_lower_case(orderby) self.assertEqual([{"project_id": "asc"}, {"resource_id": "desc"}], orderby) def test_validate_orderby_empty_direction(self): orderby = [{"project_id": ""}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) orderby = [{"project_id": "asc"}, {"resource_id": ""}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_wrong_order_string(self): orderby = [{"project_id": "not a valid order"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_wrong_multiple_item_order_string(self): orderby = [{"project_id": "not a valid order"}, {"resource_id": "ASC"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_empty_field_name(self): orderby = [{"": "ASC"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) orderby = [{"project_id": "asc"}, {"": "desc"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_wrong_field_name(self): orderby = [{"project_id11": "ASC"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_wrong_field_name_multiple_item_orderby(self): orderby = [{"project_id": "asc"}, {"resource_id11": "ASC"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_metadata_is_not_allowed(self): orderby = [{"metadata.display_name": "asc"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) class TestFilterSyntaxValidation(base.BaseTestCase): def setUp(self): super(TestFilterSyntaxValidation, self).setUp() self.query = FakeComplexQuery(models.Sample, sample_name_mapping, True) def test_simple_operator(self): filter = {"=": {"project_id": "string_value"}} self.query._validate_filter(filter) filter = {"=>": {"project_id": "string_value"}} self.query._validate_filter(filter) def test_valid_value_types(self): filter = {"=": {"project_id": "string_value"}} self.query._validate_filter(filter) filter = {"=": {"project_id": 42}} self.query._validate_filter(filter) filter = {"=": {"project_id": 3.14}} self.query._validate_filter(filter) filter = {"=": {"project_id": True}} self.query._validate_filter(filter) filter = {"=": {"project_id": False}} self.query._validate_filter(filter) def test_invalid_simple_operator(self): filter = {"==": {"project_id": "string_value"}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) filter = {"": {"project_id": "string_value"}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_more_than_one_operator_is_invalid(self): filter = {"=": {"project_id": "string_value"}, "<": {"": ""}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_empty_expression_is_invalid(self): filter = {} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_invalid_field_name(self): filter = {"=": {"": "value"}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) filter = {"=": {" ": "value"}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) filter = {"=": {"\t": "value"}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_more_than_one_field_is_invalid(self): filter = {"=": {"project_id": "value", "resource_id": "value"}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_missing_field_after_simple_op_is_invalid(self): filter = {"=": {}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_and_or(self): filter = {"and": [{"=": {"project_id": "string_value"}}, {"=": {"resource_id": "value"}}]} self.query._validate_filter(filter) filter = {"or": [{"and": [{"=": {"project_id": "string_value"}}, {"=": {"resource_id": "value"}}]}, {"=": {"counter_name": "value"}}]} self.query._validate_filter(filter) filter = {"or": [{"and": [{"=": {"project_id": "string_value"}}, {"=": {"resource_id": "value"}}, {"<": {"counter_name": 42}}]}, {"=": {"counter_name": "value"}}]} self.query._validate_filter(filter) def test_complex_operator_with_in(self): filter = {"and": [{"<": {"counter_volume": 42}}, {">=": {"counter_volume": 36}}, {"in": {"project_id": ["project_id1", "project_id2", "project_id3"]}}]} self.query._validate_filter(filter) def test_invalid_complex_operator(self): filter = {"xor": [{"=": {"project_id": "string_value"}}, {"=": {"resource_id": "value"}}]} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_and_or_with_one_child_is_invalid(self): filter = {"or": [{"=": {"project_id": "string_value"}}]} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_complex_operator_with_zero_child_is_invalid(self): filter = {"or": []} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_more_than_one_complex_operator_is_invalid(self): filter = {"and": [{"=": {"project_id": "string_value"}}, {"=": {"resource_id": "value"}}], "or": [{"=": {"project_id": "string_value"}}, {"=": {"resource_id": "value"}}]} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_not(self): filter = {"not": {"=": {"project_id": "value"}}} self.query._validate_filter(filter) filter = { "not": {"or": [{"and": [{"=": {"project_id": "string_value"}}, {"=": {"resource_id": "value"}}, {"<": {"counter_name": 42}}]}, {"=": {"counter_name": "value"}}]}} self.query._validate_filter(filter) def test_not_with_zero_child_is_invalid(self): filter = {"not": {}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_not_with_more_than_one_child_is_invalid(self): filter = {"not": {"=": {"project_id": "value"}, "!=": {"resource_id": "value"}}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_empty_in_query_not_passing(self): filter = {"in": {"resource_id": []}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) ceilometer-6.1.5/ceilometer/tests/unit/api/v2/__init__.py0000664000567000056710000000000013072744703024473 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/api/v2/test_query.py0000664000567000056710000004155513072744706025167 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test the methods related to query.""" import datetime import fixtures import mock from oslo_utils import timeutils from oslotest import base from oslotest import mockpatch import wsme from ceilometer.api.controllers.v2 import base as v2_base from ceilometer.api.controllers.v2 import events from ceilometer.api.controllers.v2 import meters from ceilometer.api.controllers.v2 import utils from ceilometer import storage from ceilometer.storage import base as storage_base from ceilometer.tests import base as tests_base class TestQuery(base.BaseTestCase): def setUp(self): super(TestQuery, self).setUp() self.useFixture(fixtures.MonkeyPatch( 'pecan.response', mock.MagicMock())) self.useFixture(mockpatch.Patch('ceilometer.api.controllers.v2.events' '._build_rbac_query_filters', return_value={'t_filter': [], 'admin_proj': None})) def test_get_value_as_type_with_integer(self): query = v2_base.Query(field='metadata.size', op='eq', value='123', type='integer') expected = 123 self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_float(self): query = v2_base.Query(field='metadata.size', op='eq', value='123.456', type='float') expected = 123.456 self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_boolean(self): query = v2_base.Query(field='metadata.is_public', op='eq', value='True', type='boolean') expected = True self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_string(self): query = v2_base.Query(field='metadata.name', op='eq', value='linux', type='string') expected = 'linux' self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_datetime(self): query = v2_base.Query(field='metadata.date', op='eq', value='2014-01-01T05:00:00', type='datetime') self.assertIsInstance(query._get_value_as_type(), datetime.datetime) self.assertIsNone(query._get_value_as_type().tzinfo) def test_get_value_as_type_with_integer_without_type(self): query = v2_base.Query(field='metadata.size', op='eq', value='123') expected = 123 self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_float_without_type(self): query = v2_base.Query(field='metadata.size', op='eq', value='123.456') expected = 123.456 self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_boolean_without_type(self): query = v2_base.Query(field='metadata.is_public', op='eq', value='True') expected = True self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_string_without_type(self): query = v2_base.Query(field='metadata.name', op='eq', value='linux') expected = 'linux' self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_bad_type(self): query = v2_base.Query(field='metadata.size', op='eq', value='123.456', type='blob') self.assertRaises(wsme.exc.ClientSideError, query._get_value_as_type) def test_get_value_as_type_with_bad_value(self): query = v2_base.Query(field='metadata.size', op='eq', value='fake', type='integer') self.assertRaises(wsme.exc.ClientSideError, query._get_value_as_type) def test_get_value_as_type_integer_expression_without_type(self): # bug 1221736 query = v2_base.Query(field='should_be_a_string', op='eq', value='WWW-Layer-4a80714f') expected = 'WWW-Layer-4a80714f' self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_boolean_expression_without_type(self): # bug 1221736 query = v2_base.Query(field='should_be_a_string', op='eq', value='True or False') expected = 'True or False' self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_syntax_error(self): # bug 1221736 value = 'WWW-Layer-4a80714f-0232-4580-aa5e-81494d1a4147-uolhh25p5xxm' query = v2_base.Query(field='group_id', op='eq', value=value) expected = value self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_syntax_error_colons(self): # bug 1221736 value = 'Ref::StackId' query = v2_base.Query(field='field_name', op='eq', value=value) expected = value self.assertEqual(expected, query._get_value_as_type()) def test_event_query_to_event_filter_with_bad_op(self): # bug 1511592 query = v2_base.Query(field='event_type', op='ne', value='compute.instance.create.end', type='string') self.assertRaises(v2_base.ClientSideError, events._event_query_to_event_filter, [query]) class TestValidateGroupByFields(base.BaseTestCase): def test_valid_field(self): result = meters._validate_groupby_fields(['user_id']) self.assertEqual(['user_id'], result) def test_valid_fields_multiple(self): result = set(meters._validate_groupby_fields( ['user_id', 'project_id', 'source'])) self.assertEqual(set(['user_id', 'project_id', 'source']), result) def test_invalid_field(self): self.assertRaises(wsme.exc.UnknownArgument, meters._validate_groupby_fields, ['wtf']) def test_invalid_field_multiple(self): self.assertRaises(wsme.exc.UnknownArgument, meters._validate_groupby_fields, ['user_id', 'wtf', 'project_id', 'source']) def test_duplicate_fields(self): result = set( meters._validate_groupby_fields(['user_id', 'source', 'user_id']) ) self.assertEqual(set(['user_id', 'source']), result) class TestQueryToKwArgs(tests_base.BaseTestCase): def setUp(self): super(TestQueryToKwArgs, self).setUp() self.useFixture(mockpatch.PatchObject( utils, 'sanitize_query', side_effect=lambda x, y, **z: x)) self.useFixture(mockpatch.PatchObject( utils, '_verify_query_segregation', side_effect=lambda x, **z: x)) def test_sample_filter_single(self): q = [v2_base.Query(field='user_id', op='eq', value='uid')] kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) self.assertIn('user', kwargs) self.assertEqual(1, len(kwargs)) self.assertEqual('uid', kwargs['user']) def test_sample_filter_multi(self): q = [v2_base.Query(field='user_id', op='eq', value='uid'), v2_base.Query(field='project_id', op='eq', value='pid'), v2_base.Query(field='resource_id', op='eq', value='rid'), v2_base.Query(field='source', op='eq', value='source_name'), v2_base.Query(field='meter', op='eq', value='meter_name')] kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) self.assertEqual(5, len(kwargs)) self.assertEqual('uid', kwargs['user']) self.assertEqual('pid', kwargs['project']) self.assertEqual('rid', kwargs['resource']) self.assertEqual('source_name', kwargs['source']) self.assertEqual('meter_name', kwargs['meter']) def test_sample_filter_timestamp(self): ts_start = timeutils.utcnow() ts_end = ts_start + datetime.timedelta(minutes=5) q = [v2_base.Query(field='timestamp', op='lt', value=str(ts_end)), v2_base.Query(field='timestamp', op='gt', value=str(ts_start))] kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) self.assertEqual(4, len(kwargs)) self.assertTimestampEqual(kwargs['start_timestamp'], ts_start) self.assertTimestampEqual(kwargs['end_timestamp'], ts_end) self.assertEqual('gt', kwargs['start_timestamp_op']) self.assertEqual('lt', kwargs['end_timestamp_op']) def test_sample_filter_meta(self): q = [v2_base.Query(field='metadata.size', op='eq', value='20'), v2_base.Query(field='resource_metadata.id', op='eq', value='meta_id')] kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) self.assertEqual(1, len(kwargs)) self.assertEqual(2, len(kwargs['metaquery'])) self.assertEqual(20, kwargs['metaquery']['metadata.size']) self.assertEqual('meta_id', kwargs['metaquery']['metadata.id']) def test_sample_filter_non_equality_on_metadata(self): queries = [v2_base.Query(field='resource_metadata.image_id', op='gt', value='image', type='string'), v2_base.Query(field='metadata.ramdisk_id', op='le', value='ramdisk', type='string')] with mock.patch('pecan.request') as request: request.headers.return_value = {'X-ProjectId': 'foobar'} self.assertRaises( wsme.exc.InvalidInput, utils.query_to_kwargs, queries, storage.SampleFilter.__init__) def test_sample_filter_invalid_field(self): q = [v2_base.Query(field='invalid', op='eq', value='20')] self.assertRaises( wsme.exc.UnknownArgument, utils.query_to_kwargs, q, storage.SampleFilter.__init__) def test_sample_filter_invalid_op(self): q = [v2_base.Query(field='user_id', op='lt', value='20')] self.assertRaises( wsme.exc.InvalidInput, utils.query_to_kwargs, q, storage.SampleFilter.__init__) def test_sample_filter_timestamp_invalid_op(self): ts_start = timeutils.utcnow() q = [v2_base.Query(field='timestamp', op='eq', value=str(ts_start))] self.assertRaises( wsme.exc.InvalidInput, utils.query_to_kwargs, q, storage.SampleFilter.__init__) def test_sample_filter_exclude_internal(self): queries = [v2_base.Query(field=f, op='eq', value='fake', type='string') for f in ['y', 'on_behalf_of', 'x']] with mock.patch('pecan.request') as request: request.headers.return_value = {'X-ProjectId': 'foobar'} self.assertRaises(wsme.exc.ClientSideError, utils.query_to_kwargs, queries, storage.SampleFilter.__init__, internal_keys=['on_behalf_of']) def test_sample_filter_self_always_excluded(self): queries = [v2_base.Query(field='user_id', op='eq', value='20')] with mock.patch('pecan.request') as request: request.headers.return_value = {'X-ProjectId': 'foobar'} kwargs = utils.query_to_kwargs(queries, storage.SampleFilter.__init__) self.assertNotIn('self', kwargs) def test_sample_filter_translation(self): queries = [v2_base.Query(field=f, op='eq', value='fake_%s' % f, type='string') for f in ['user_id', 'project_id', 'resource_id']] with mock.patch('pecan.request') as request: request.headers.return_value = {'X-ProjectId': 'foobar'} kwargs = utils.query_to_kwargs(queries, storage.SampleFilter.__init__) for o in ['user', 'project', 'resource']: self.assertEqual('fake_%s_id' % o, kwargs.get(o)) def test_timestamp_validation(self): q = [v2_base.Query(field='timestamp', op='le', value='123')] exc = self.assertRaises( wsme.exc.InvalidInput, utils.query_to_kwargs, q, storage.SampleFilter.__init__) expected_exc = wsme.exc.InvalidInput('timestamp', '123', 'invalid timestamp format') self.assertEqual(str(expected_exc), str(exc)) def test_sample_filter_valid_fields(self): q = [v2_base.Query(field='abc', op='eq', value='abc')] exc = self.assertRaises( wsme.exc.UnknownArgument, utils.query_to_kwargs, q, storage.SampleFilter.__init__) valid_keys = ['message_id', 'meter', 'project', 'resource', 'search_offset', 'source', 'timestamp', 'user'] msg = ("unrecognized field in query: %s, " "valid keys: %s") % (q, valid_keys) expected_exc = wsme.exc.UnknownArgument('abc', msg) self.assertEqual(str(expected_exc), str(exc)) def test_get_meters_filter_valid_fields(self): q = [v2_base.Query(field='abc', op='eq', value='abc')] exc = self.assertRaises( wsme.exc.UnknownArgument, utils.query_to_kwargs, q, storage_base.Connection.get_meters, ['limit', 'unique']) valid_keys = ['project', 'resource', 'source', 'user'] msg = ("unrecognized field in query: %s, " "valid keys: %s") % (q, valid_keys) expected_exc = wsme.exc.UnknownArgument('abc', msg) self.assertEqual(str(expected_exc), str(exc)) def test_get_resources_filter_valid_fields(self): q = [v2_base.Query(field='abc', op='eq', value='abc')] exc = self.assertRaises( wsme.exc.UnknownArgument, utils.query_to_kwargs, q, storage_base.Connection.get_resources, ['limit']) valid_keys = ['project', 'resource', 'search_offset', 'source', 'timestamp', 'user'] msg = ("unrecognized field in query: %s, " "valid keys: %s") % (q, valid_keys) expected_exc = wsme.exc.UnknownArgument('abc', msg) self.assertEqual(str(expected_exc), str(exc)) ceilometer-6.1.5/ceilometer/tests/unit/api/__init__.py0000664000567000056710000000000013072744703024144 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/transformer/0000775000567000056710000000000013072745164023640 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/transformer/test_conversions.py0000664000567000056710000001077113072744706027630 0ustar jenkinsjenkins00000000000000# # Copyright 2016 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_context import context from oslo_utils import timeutils from oslotest import base from ceilometer import sample from ceilometer.transformer import conversions class AggregatorTransformerTestCase(base.BaseTestCase): SAMPLE = sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, unit='ns', volume='1234567', user_id='56c5692032f34041900342503fecab30', project_id='ac9494df2d9d4e709bac378cceabaf23', resource_id='1ca738a1-c49c-4401-8346-5c60ebdb03f4', timestamp="2015-10-29 14:12:15.485877+00:00", resource_metadata={} ) def setUp(self): super(AggregatorTransformerTestCase, self).setUp() self._sample_offset = 0 def test_init_input_validation(self): aggregator = conversions.AggregatorTransformer("2", "15", None, None, None) self.assertEqual(2, aggregator.size) self.assertEqual(15, aggregator.retention_time) def test_init_no_size_or_rention_time(self): aggregator = conversions.AggregatorTransformer() self.assertEqual(1, aggregator.size) self.assertIsNone(aggregator.retention_time) def test_init_size_zero(self): aggregator = conversions.AggregatorTransformer(size="0") self.assertEqual(1, aggregator.size) self.assertIsNone(aggregator.retention_time) def test_init_input_validation_size_invalid(self): self.assertRaises(ValueError, conversions.AggregatorTransformer, "abc", "15", None, None, None) def test_init_input_validation_retention_time_invalid(self): self.assertRaises(ValueError, conversions.AggregatorTransformer, "2", "abc", None, None, None) def test_init_no_timestamp(self): aggregator = conversions.AggregatorTransformer("1", "1", None, None, None) self.assertEqual("first", aggregator.timestamp) def test_init_timestamp_none(self): aggregator = conversions.AggregatorTransformer("1", "1", None, None, None, None) self.assertEqual("first", aggregator.timestamp) def test_init_timestamp_first(self): aggregator = conversions.AggregatorTransformer("1", "1", None, None, None, "first") self.assertEqual("first", aggregator.timestamp) def test_init_timestamp_last(self): aggregator = conversions.AggregatorTransformer("1", "1", None, None, None, "last") self.assertEqual("last", aggregator.timestamp) def test_init_timestamp_invalid(self): aggregator = conversions.AggregatorTransformer("1", "1", None, None, None, "invalid_option") self.assertEqual("first", aggregator.timestamp) def test_size_unbounded(self): aggregator = conversions.AggregatorTransformer(size="0", retention_time="300") self._insert_sample_data(aggregator) samples = aggregator.flush(context.get_admin_context()) self.assertEqual([], samples) def test_size_bounded(self): aggregator = conversions.AggregatorTransformer(size="100") self._insert_sample_data(aggregator) samples = aggregator.flush(context.get_admin_context()) self.assertEqual(100, len(samples)) def _insert_sample_data(self, aggregator): for _ in range(100): sample = copy.copy(self.SAMPLE) sample.resource_id = sample.resource_id + str(self._sample_offset) sample.timestamp = timeutils.isotime() aggregator.handle_sample(context.get_admin_context(), sample) self._sample_offset += 1 ceilometer-6.1.5/ceilometer/tests/unit/transformer/__init__.py0000664000567000056710000000000013072744703025735 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/test_declarative.py0000664000567000056710000000315313072744703025172 0ustar jenkinsjenkins00000000000000# # Copyright 2016 Mirantis, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import mockpatch from ceilometer import declarative from ceilometer.tests import base class TestDefinition(base.BaseTestCase): def setUp(self): super(TestDefinition, self).setUp() self.configs = [ "_field1", "_field2|_field3", {'fields': 'field4.`split(., 1, 1)`'}, {'fields': ['field5.arg', 'field6'], 'type': 'text'} ] self.parser = mock.MagicMock() parser_patch = mockpatch.Patch( "jsonpath_rw_ext.parser.ExtentedJsonPathParser.parse", new=self.parser) self.useFixture(parser_patch) def test_caching_parsers(self): for config in self.configs * 2: declarative.Definition("test", config, mock.MagicMock()) self.assertEqual(4, self.parser.call_count) self.parser.assert_has_calls([ mock.call("_field1"), mock.call("_field2|_field3"), mock.call("field4.`split(., 1, 1)`"), mock.call("(field5.arg)|(field6)"), ]) ceilometer-6.1.5/ceilometer/tests/unit/test_messaging.py0000664000567000056710000000473013072744706024671 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import fixture as fixture_config import oslo_messaging.conffixture from oslotest import base from ceilometer import messaging class MessagingTests(base.BaseTestCase): def setUp(self): super(MessagingTests, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.useFixture(oslo_messaging.conffixture.ConfFixture(self.CONF)) def test_get_transport_invalid_url(self): self.assertRaises(oslo_messaging.InvalidTransportURL, messaging.get_transport, "notvalid!") def test_get_transport_url_caching(self): t1 = messaging.get_transport('fake://') t2 = messaging.get_transport('fake://') self.assertEqual(t1, t2) def test_get_transport_default_url_caching(self): t1 = messaging.get_transport() t2 = messaging.get_transport() self.assertEqual(t1, t2) def test_get_transport_default_url_no_caching(self): t1 = messaging.get_transport(cache=False) t2 = messaging.get_transport(cache=False) self.assertNotEqual(t1, t2) def test_get_transport_url_no_caching(self): t1 = messaging.get_transport('fake://', cache=False) t2 = messaging.get_transport('fake://', cache=False) self.assertNotEqual(t1, t2) def test_get_transport_default_url_caching_mix(self): t1 = messaging.get_transport() t2 = messaging.get_transport(cache=False) self.assertNotEqual(t1, t2) def test_get_transport_url_caching_mix(self): t1 = messaging.get_transport('fake://') t2 = messaging.get_transport('fake://', cache=False) self.assertNotEqual(t1, t2) def test_get_transport_optional(self): self.CONF.set_override('rpc_backend', '') self.assertIsNone(messaging.get_transport(optional=True, cache=False)) ceilometer-6.1.5/ceilometer/tests/unit/test_decoupled_pipeline.py0000664000567000056710000002725313072744706026552 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import yaml from ceilometer import pipeline from ceilometer import sample from ceilometer.tests import pipeline_base class TestDecoupledPipeline(pipeline_base.BasePipelineTestCase): def _setup_pipeline_cfg(self): source = {'name': 'test_source', 'interval': 5, 'counters': ['a'], 'resources': [], 'sinks': ['test_sink']} sink = {'name': 'test_sink', 'transformers': [{'name': 'update', 'parameters': {}}], 'publishers': ['test://']} self.pipeline_cfg = {'sources': [source], 'sinks': [sink]} def _augment_pipeline_cfg(self): self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'interval': 5, 'counters': ['b'], 'resources': [], 'sinks': ['second_sink'] }) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'transformers': [{ 'name': 'update', 'parameters': { 'append_name': '_new', } }], 'publishers': ['new'], }) def _break_pipeline_cfg(self): self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'interval': 5, 'counters': ['b'], 'resources': [], 'sinks': ['second_sink'] }) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'transformers': [{ 'name': 'update', 'parameters': { 'append_name': '_new', } }], 'publishers': ['except'], }) def _dup_pipeline_name_cfg(self): self.pipeline_cfg['sources'].append({ 'name': 'test_source', 'interval': 5, 'counters': ['b'], 'resources': [], 'sinks': ['test_sink'] }) def _set_pipeline_cfg(self, field, value): if field in self.pipeline_cfg['sources'][0]: self.pipeline_cfg['sources'][0][field] = value else: self.pipeline_cfg['sinks'][0][field] = value def _extend_pipeline_cfg(self, field, value): if field in self.pipeline_cfg['sources'][0]: self.pipeline_cfg['sources'][0][field].extend(value) else: self.pipeline_cfg['sinks'][0][field].extend(value) def _unset_pipeline_cfg(self, field): if field in self.pipeline_cfg['sources'][0]: del self.pipeline_cfg['sources'][0][field] else: del self.pipeline_cfg['sinks'][0][field] def test_source_no_sink(self): del self.pipeline_cfg['sinks'] self._exception_create_pipelinemanager() def test_source_no_meters_or_counters(self): del self.pipeline_cfg['sources'][0]['counters'] self._exception_create_pipelinemanager() def test_source_dangling_sink(self): self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'interval': 5, 'counters': ['b'], 'resources': [], 'sinks': ['second_sink'] }) self._exception_create_pipelinemanager() def test_sink_no_source(self): del self.pipeline_cfg['sources'] self._exception_create_pipelinemanager() def test_source_with_multiple_sinks(self): counter_cfg = ['a', 'b'] self._set_pipeline_cfg('counters', counter_cfg) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'transformers': [{ 'name': 'update', 'parameters': { 'append_name': '_new', } }], 'publishers': ['new'], }) self.pipeline_cfg['sources'][0]['sinks'].append('second_sink') pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher(None) as p: p([self.test_counter]) self.assertEqual(2, len(pipeline_manager.pipelines)) self.assertEqual('test_source:test_sink', str(pipeline_manager.pipelines[0])) self.assertEqual('test_source:second_sink', str(pipeline_manager.pipelines[1])) test_publisher = pipeline_manager.pipelines[0].publishers[0] new_publisher = pipeline_manager.pipelines[1].publishers[0] for publisher, sfx in [(test_publisher, '_update'), (new_publisher, '_new')]: self.assertEqual(2, len(publisher.samples)) self.assertEqual(2, publisher.calls) self.assertEqual('a' + sfx, getattr(publisher.samples[0], "name")) self.assertEqual('b' + sfx, getattr(publisher.samples[1], "name")) def test_multiple_sources_with_single_sink(self): self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'interval': 5, 'counters': ['b'], 'resources': [], 'sinks': ['test_sink'] }) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher(None) as p: p([self.test_counter]) self.assertEqual(2, len(pipeline_manager.pipelines)) self.assertEqual('test_source:test_sink', str(pipeline_manager.pipelines[0])) self.assertEqual('second_source:test_sink', str(pipeline_manager.pipelines[1])) test_publisher = pipeline_manager.pipelines[0].publishers[0] another_publisher = pipeline_manager.pipelines[1].publishers[0] for publisher in [test_publisher, another_publisher]: self.assertEqual(2, len(publisher.samples)) self.assertEqual(2, publisher.calls) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) self.assertEqual('b_update', getattr(publisher.samples[1], "name")) transformed_samples = self.TransformerClass.samples self.assertEqual(2, len(transformed_samples)) self.assertEqual(['a', 'b'], [getattr(s, 'name') for s in transformed_samples]) def _do_test_rate_of_change_in_boilerplate_pipeline_cfg(self, index, meters, units): with open('etc/ceilometer/pipeline.yaml') as fap: data = fap.read() pipeline_cfg = yaml.safe_load(data) for s in pipeline_cfg['sinks']: s['publishers'] = ['test://'] pipeline_manager = pipeline.PipelineManager(pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[index] self._do_test_rate_of_change_mapping(pipe, meters, units) def test_rate_of_change_boilerplate_disk_read_cfg(self): meters = ('disk.read.bytes', 'disk.read.requests') units = ('B', 'request') self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, meters, units) def test_rate_of_change_boilerplate_disk_write_cfg(self): meters = ('disk.write.bytes', 'disk.write.requests') units = ('B', 'request') self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, meters, units) def test_rate_of_change_boilerplate_network_incoming_cfg(self): meters = ('network.incoming.bytes', 'network.incoming.packets') units = ('B', 'packet') self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(4, meters, units) def test_rate_of_change_boilerplate_per_disk_device_read_cfg(self): meters = ('disk.device.read.bytes', 'disk.device.read.requests') units = ('B', 'request') self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, meters, units) def test_rate_of_change_boilerplate_per_disk_device_write_cfg(self): meters = ('disk.device.write.bytes', 'disk.device.write.requests') units = ('B', 'request') self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, meters, units) def test_rate_of_change_boilerplate_network_outgoing_cfg(self): meters = ('network.outgoing.bytes', 'network.outgoing.packets') units = ('B', 'packet') self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(4, meters, units) def test_duplicated_sinks_names(self): self.pipeline_cfg['sinks'].append({ 'name': 'test_sink', 'publishers': ['except'], }) self.assertRaises(pipeline.PipelineException, pipeline.PipelineManager, self.pipeline_cfg, self.transformer_manager) def test_duplicated_source_names(self): self.pipeline_cfg['sources'].append({ 'name': 'test_source', 'interval': 5, 'counters': ['a'], 'resources': [], 'sinks': ['test_sink'] }) self.assertRaises(pipeline.PipelineException, pipeline.PipelineManager, self.pipeline_cfg, self.transformer_manager) ceilometer-6.1.5/ceilometer/tests/unit/energy/0000775000567000056710000000000013072745164022567 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/energy/test_kwapi.py0000664000567000056710000001066713072744706025326 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from keystoneauth1 import exceptions import mock from oslo_context import context from oslotest import base from oslotest import mockpatch import six from ceilometer.agent import manager from ceilometer.energy import kwapi PROBE_DICT = { "probes": { "A": { "timestamp": 1357730232.68754, "w": 107.3, "kwh": 0.001058255421506034 }, "B": { "timestamp": 1357730232.048158, "w": 15.0, "kwh": 0.029019045026169896 }, "C": { "timestamp": 1357730232.223375, "w": 95.0, "kwh": 0.17361822634312918 } } } ENDPOINT = 'end://point' class TestManager(manager.AgentManager): def __init__(self): super(TestManager, self).__init__() self._keystone = mock.Mock() class _BaseTestCase(base.BaseTestCase): @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(_BaseTestCase, self).setUp() self.context = context.get_admin_context() self.manager = TestManager() class TestKwapi(_BaseTestCase): @staticmethod def fake_get_kwapi_client(ksclient, endpoint): raise exceptions.EndpointNotFound("fake keystone exception") def test_endpoint_not_exist(self): with mockpatch.PatchObject(kwapi._Base, 'get_kwapi_client', side_effect=self.fake_get_kwapi_client): pollster = kwapi.EnergyPollster() samples = list(pollster.get_samples(self.manager, {}, [ENDPOINT])) self.assertEqual(0, len(samples)) class TestEnergyPollster(_BaseTestCase): pollster_cls = kwapi.EnergyPollster unit = 'kwh' def setUp(self): super(TestEnergyPollster, self).setUp() self.useFixture(mockpatch.PatchObject( kwapi._Base, '_iter_probes', side_effect=self.fake_iter_probes)) @staticmethod def fake_iter_probes(ksclient, cache, endpoint): probes = PROBE_DICT['probes'] for key, value in six.iteritems(probes): probe_dict = value probe_dict['id'] = key yield probe_dict def test_default_discovery(self): pollster = kwapi.EnergyPollster() self.assertEqual('endpoint:energy', pollster.default_discovery) def test_sample(self): cache = {} samples = list(self.pollster_cls().get_samples(self.manager, cache, [ENDPOINT])) self.assertEqual(len(PROBE_DICT['probes']), len(samples)) samples_by_name = dict((s.resource_id, s) for s in samples) for name, probe in PROBE_DICT['probes'].items(): sample = samples_by_name[name] expected = datetime.datetime.fromtimestamp( probe['timestamp'] ).isoformat() self.assertEqual(expected, sample.timestamp) self.assertEqual(probe[self.unit], sample.volume) class TestPowerPollster(TestEnergyPollster): pollster_cls = kwapi.PowerPollster unit = 'w' class TestEnergyPollsterCache(_BaseTestCase): pollster_cls = kwapi.EnergyPollster def test_get_samples_cached(self): probe = {'id': 'A'} probe.update(PROBE_DICT['probes']['A']) cache = { '%s-%s' % (ENDPOINT, self.pollster_cls.CACHE_KEY_PROBE): [probe], } self.manager._keystone = mock.Mock() pollster = self.pollster_cls() with mock.patch.object(pollster, '_get_probes') as do_not_call: do_not_call.side_effect = AssertionError('should not be called') samples = list(pollster.get_samples(self.manager, cache, [ENDPOINT])) self.assertEqual(1, len(samples)) class TestPowerPollsterCache(TestEnergyPollsterCache): pollster_cls = kwapi.PowerPollster ceilometer-6.1.5/ceilometer/tests/unit/energy/__init__.py0000664000567000056710000000000013072744706024667 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/ipmi/0000775000567000056710000000000013072745164022234 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/ipmi/pollsters/0000775000567000056710000000000013072745164024263 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/ipmi/pollsters/base.py0000664000567000056710000000500613072744706025551 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import mock from oslotest import mockpatch import six from ceilometer.agent import manager from ceilometer.tests import base @six.add_metaclass(abc.ABCMeta) class TestPollsterBase(base.BaseTestCase): def fake_data(self): """Fake data used for test.""" return None def fake_sensor_data(self, sensor_type): """Fake sensor data used for test.""" return None @abc.abstractmethod def make_pollster(self): """Produce right pollster for test.""" def _test_get_samples(self): nm = mock.Mock() nm.read_inlet_temperature.side_effect = self.fake_data nm.read_outlet_temperature.side_effect = self.fake_data nm.read_power_all.side_effect = self.fake_data nm.read_airflow.side_effect = self.fake_data nm.read_cups_index.side_effect = self.fake_data nm.read_cups_utilization.side_effect = self.fake_data nm.read_sensor_any.side_effect = self.fake_sensor_data # We should mock the pollster first before initialize the Manager # so that we don't trigger the sudo in pollsters' __init__(). self.useFixture(mockpatch.Patch( 'ceilometer.ipmi.platform.intel_node_manager.NodeManager', return_value=nm)) self.useFixture(mockpatch.Patch( 'ceilometer.ipmi.platform.ipmi_sensor.IPMISensor', return_value=nm)) self.mgr = manager.AgentManager(['ipmi']) self.pollster = self.make_pollster() def _verify_metering(self, length, expected_vol=None, node=None): cache = {} resources = ['local_host'] samples = list(self.pollster.get_samples(self.mgr, cache, resources)) self.assertEqual(length, len(samples)) if expected_vol: self.assertTrue(any(s.volume == expected_vol for s in samples)) if node: self.assertTrue(any(s.resource_metadata['node'] == node for s in samples)) ceilometer-6.1.5/ceilometer/tests/unit/ipmi/pollsters/test_sensor.py0000664000567000056710000001007513072744706027211 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from ceilometer.ipmi.pollsters import sensor from ceilometer.tests.unit.ipmi.notifications import ipmi_test_data from ceilometer.tests.unit.ipmi.pollsters import base CONF = cfg.CONF CONF.import_opt('host', 'ceilometer.service') TEMPERATURE_SENSOR_DATA = { 'Temperature': ipmi_test_data.TEMPERATURE_DATA } CURRENT_SENSOR_DATA = { 'Current': ipmi_test_data.CURRENT_DATA } FAN_SENSOR_DATA = { 'Fan': ipmi_test_data.FAN_DATA } VOLTAGE_SENSOR_DATA = { 'Voltage': ipmi_test_data.VOLTAGE_DATA } MISSING_SENSOR_DATA = ipmi_test_data.MISSING_SENSOR['payload']['payload'] MALFORMED_SENSOR_DATA = ipmi_test_data.BAD_SENSOR['payload']['payload'] MISSING_ID_SENSOR_DATA = ipmi_test_data.NO_SENSOR_ID['payload']['payload'] class TestTemperatureSensorPollster(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return TEMPERATURE_SENSOR_DATA def make_pollster(self): return sensor.TemperatureSensorPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() self._verify_metering(10, float(32), CONF.host) class TestMissingSensorData(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return MISSING_SENSOR_DATA def make_pollster(self): return sensor.TemperatureSensorPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() self._verify_metering(0) class TestMalformedSensorData(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return MALFORMED_SENSOR_DATA def make_pollster(self): return sensor.TemperatureSensorPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() self._verify_metering(0) class TestMissingSensorId(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return MISSING_ID_SENSOR_DATA def make_pollster(self): return sensor.TemperatureSensorPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() self._verify_metering(0) class TestFanSensorPollster(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return FAN_SENSOR_DATA def make_pollster(self): return sensor.FanSensorPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() self._verify_metering(12, float(7140), CONF.host) class TestCurrentSensorPollster(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return CURRENT_SENSOR_DATA def make_pollster(self): return sensor.CurrentSensorPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() self._verify_metering(1, float(130), CONF.host) class TestVoltageSensorPollster(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return VOLTAGE_SENSOR_DATA def make_pollster(self): return sensor.VoltageSensorPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() self._verify_metering(4, float(3.309), CONF.host) ceilometer-6.1.5/ceilometer/tests/unit/ipmi/pollsters/test_node.py0000664000567000056710000001144413072744706026626 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from ceilometer.ipmi.pollsters import node from ceilometer.tests.unit.ipmi.pollsters import base CONF = cfg.CONF CONF.import_opt('host', 'ceilometer.service') class TestPowerPollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"Current_value": ['13', '00']} def make_pollster(self): return node.PowerPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 19(0x13 as current_value) self._verify_metering(1, 19, CONF.host) class TestInletTemperaturePollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"Current_value": ['23', '00']} def make_pollster(self): return node.InletTemperaturePollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 35(0x23 as current_value) self._verify_metering(1, 35, CONF.host) class TestOutletTemperaturePollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"Current_value": ['25', '00']} def make_pollster(self): return node.OutletTemperaturePollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 37(0x25 as current_value) self._verify_metering(1, 37, CONF.host) class TestAirflowPollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"Current_value": ['be', '00']} def make_pollster(self): return node.AirflowPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 190(0xbe as current_value) self._verify_metering(1, 190, CONF.host) class TestCUPSIndexPollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"CUPS_Index": ['2e', '00']} def make_pollster(self): return node.CUPSIndexPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 190(0xbe) self._verify_metering(1, 46, CONF.host) class CPUUtilPollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"CPU_Utilization": ['33', '00', '00', '00', '00', '00', '00', '00']} def make_pollster(self): return node.CPUUtilPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 190(0xbe) self._verify_metering(1, 51, CONF.host) class MemUtilPollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"Mem_Utilization": ['05', '00', '00', '00', '00', '00', '00', '00']} def make_pollster(self): return node.MemUtilPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 5(0x05) self._verify_metering(1, 5, CONF.host) class IOUtilPollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"IO_Utilization": ['00', '00', '00', '00', '00', '00', '00', '00']} def make_pollster(self): return node.IOUtilPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 0(0x00) self._verify_metering(1, 0, CONF.host) ceilometer-6.1.5/ceilometer/tests/unit/ipmi/pollsters/__init__.py0000664000567000056710000000000013072744703026360 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/ipmi/platform/0000775000567000056710000000000013072745164024060 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/ipmi/platform/fake_utils.py0000664000567000056710000000747213072744703026570 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import binascii from ceilometer.ipmi.platform import exception as nmexcept from ceilometer.ipmi.platform import intel_node_manager as node_manager from ceilometer.tests.unit.ipmi.platform import ipmitool_test_data as test_data def get_sensor_status_init(parameter=''): return (' 01\n', '') def get_sensor_status_uninit(parameter=''): return (' 00\n', '') def init_sensor_agent(parameter=''): return (' 00\n', '') def get_nm_version_v2(parameter=''): return test_data.nm_version_v2 def get_nm_version_v3(parameter=''): return test_data.nm_version_v3 def sdr_dump(data_file=''): if data_file == '': raise ValueError("No file specified for ipmitool sdr dump") fake_slave_address = '2c' fake_channel = '60' hexstr = node_manager.INTEL_PREFIX + fake_slave_address + fake_channel data = binascii.unhexlify(hexstr) with open(data_file, 'wb') as bin_fp: bin_fp.write(data) return ('', '') def _execute(funcs, *cmd, **kwargs): datas = { test_data.device_id_cmd: test_data.device_id, test_data.nm_device_id_cmd: test_data.nm_device_id, test_data.get_power_cmd: test_data.power_data, test_data.get_inlet_temp_cmd: test_data.inlet_temperature_data, test_data.get_outlet_temp_cmd: test_data.outlet_temperature_data, test_data.get_airflow_cmd: test_data.airflow_data, test_data.get_cups_index_cmd: test_data.cups_index_data, test_data.get_cups_util_cmd: test_data.cups_util_data, test_data.sdr_info_cmd: test_data.sdr_info, test_data.read_sensor_temperature_cmd: test_data.sensor_temperature, test_data.read_sensor_voltage_cmd: test_data.sensor_voltage, test_data.read_sensor_current_cmd: test_data.sensor_current, test_data.read_sensor_fan_cmd: test_data.sensor_fan, } if cmd[1] == 'sdr' and cmd[2] == 'dump': # ipmitool sdr dump /tmp/XXXX cmd_str = "".join(cmd[:3]) par_str = cmd[3] else: cmd_str = "".join(cmd) par_str = '' try: return datas[cmd_str] except KeyError: return funcs[cmd_str](par_str) def execute_with_nm_v3(*cmd, **kwargs): """test version of execute on Node Manager V3.0 platform.""" funcs = {test_data.sensor_status_cmd: get_sensor_status_init, test_data.init_sensor_cmd: init_sensor_agent, test_data.sdr_dump_cmd: sdr_dump, test_data.nm_version_cmd: get_nm_version_v3} return _execute(funcs, *cmd, **kwargs) def execute_with_nm_v2(*cmd, **kwargs): """test version of execute on Node Manager V2.0 platform.""" funcs = {test_data.sensor_status_cmd: get_sensor_status_init, test_data.init_sensor_cmd: init_sensor_agent, test_data.sdr_dump_cmd: sdr_dump, test_data.nm_version_cmd: get_nm_version_v2} return _execute(funcs, *cmd, **kwargs) def execute_without_nm(*cmd, **kwargs): """test version of execute on Non-Node Manager platform.""" funcs = {test_data.sensor_status_cmd: get_sensor_status_uninit, test_data.init_sensor_cmd: init_sensor_agent, test_data.sdr_dump_cmd: sdr_dump} return _execute(funcs, *cmd, **kwargs) def execute_without_ipmi(*cmd, **kwargs): raise nmexcept.IPMIException ceilometer-6.1.5/ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py0000664000567000056710000003223313072744703030317 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Sample data for test_intel_node_manager and test_ipmi_sensor. This data is provided as a sample of the data expected from the ipmitool binary, which produce Node Manager/IPMI raw data """ sensor_temperature_data = """Sensor ID : SSB Therm Trip (0xd) Entity ID : 7.1 (System Board) Sensor Type (Discrete): Temperature Assertions Enabled : Digital State [State Asserted] Deassertions Enabled : Digital State [State Asserted] Sensor ID : BB P1 VR Temp (0x20) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Temperature Sensor Reading : 25 (+/- 0) degrees C Status : ok Nominal Reading : 58.000 Normal Minimum : 10.000 Normal Maximum : 105.000 Upper critical : 115.000 Upper non-critical : 110.000 Lower critical : 0.000 Lower non-critical : 5.000 Positive Hysteresis : 2.000 Negative Hysteresis : 2.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ Sensor ID : Front Panel Temp (0x21) Entity ID : 12.1 (Front Panel Board) Sensor Type (Analog) : Temperature Sensor Reading : 23 (+/- 0) degrees C Status : ok Nominal Reading : 28.000 Normal Minimum : 10.000 Normal Maximum : 45.000 Upper critical : 55.000 Upper non-critical : 50.000 Lower critical : 0.000 Lower non-critical : 5.000 Positive Hysteresis : 2.000 Negative Hysteresis : 2.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ Sensor ID : SSB Temp (0x22) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Temperature Sensor Reading : 43 (+/- 0) degrees C Status : ok Nominal Reading : 52.000 Normal Minimum : 10.000 Normal Maximum : 93.000 Upper critical : 103.000 Upper non-critical : 98.000 Lower critical : 0.000 Lower non-critical : 5.000 Positive Hysteresis : 2.000 Negative Hysteresis : 2.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ """ sensor_voltage_data = """Sensor ID : VR Watchdog (0xb) Entity ID : 7.1 (System Board) Sensor Type (Discrete): Voltage Assertions Enabled : Digital State [State Asserted] Deassertions Enabled : Digital State [State Asserted] Sensor ID : BB +12.0V (0xd0) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Voltage Sensor Reading : 11.831 (+/- 0) Volts Status : ok Nominal Reading : 11.935 Normal Minimum : 11.363 Normal Maximum : 12.559 Upper critical : 13.391 Upper non-critical : 13.027 Lower critical : 10.635 Lower non-critical : 10.947 Positive Hysteresis : 0.052 Negative Hysteresis : 0.052 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ Sensor ID : BB +1.35 P1LV AB (0xe4) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Voltage Sensor Reading : Disabled Status : Disabled Nominal Reading : 1.342 Normal Minimum : 1.275 Normal Maximum : 1.409 Upper critical : 1.488 Upper non-critical : 1.445 Lower critical : 1.201 Lower non-critical : 1.244 Positive Hysteresis : 0.006 Negative Hysteresis : 0.006 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Event Status : Unavailable Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ Sensor ID : BB +5.0V (0xd1) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Voltage Sensor Reading : 4.959 (+/- 0) Volts Status : ok Nominal Reading : 4.981 Normal Minimum : 4.742 Normal Maximum : 5.241 Upper critical : 5.566 Upper non-critical : 5.415 Lower critical : 4.416 Lower non-critical : 4.546 Positive Hysteresis : 0.022 Negative Hysteresis : 0.022 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ """ sensor_current_data = """Sensor ID : PS1 Curr Out % (0x58) Entity ID : 10.1 (Power Supply) Sensor Type (Analog) : Current Sensor Reading : 11 (+/- 0) unspecified Status : ok Nominal Reading : 50.000 Normal Minimum : 0.000 Normal Maximum : 100.000 Upper critical : 118.000 Upper non-critical : 100.000 Positive Hysteresis : Unspecified Negative Hysteresis : Unspecified Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : unc ucr Settable Thresholds : unc ucr Threshold Read Mask : unc ucr Assertion Events : Assertions Enabled : unc+ ucr+ Deassertions Enabled : unc+ ucr+ Sensor ID : PS2 Curr Out % (0x59) Entity ID : 10.2 (Power Supply) Sensor Type (Analog) : Current Sensor Reading : 0 (+/- 0) unspecified Status : ok Nominal Reading : 50.000 Normal Minimum : 0.000 Normal Maximum : 100.000 Upper critical : 118.000 Upper non-critical : 100.000 Positive Hysteresis : Unspecified Negative Hysteresis : Unspecified Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : unc ucr Settable Thresholds : unc ucr Threshold Read Mask : unc ucr Assertion Events : Assertions Enabled : unc+ ucr+ Deassertions Enabled : unc+ ucr+ """ sensor_fan_data = """Sensor ID : System Fan 1 (0x30) Entity ID : 29.1 (Fan Device) Sensor Type (Analog) : Fan Sensor Reading : 4704 (+/- 0) RPM Status : ok Nominal Reading : 7497.000 Normal Minimum : 2499.000 Normal Maximum : 12495.000 Lower critical : 1715.000 Lower non-critical : 1960.000 Positive Hysteresis : 49.000 Negative Hysteresis : 49.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc Settable Thresholds : lcr lnc Threshold Read Mask : lcr lnc Assertion Events : Assertions Enabled : lnc- lcr- Deassertions Enabled : lnc- lcr- Sensor ID : System Fan 2 (0x32) Entity ID : 29.2 (Fan Device) Sensor Type (Analog) : Fan Sensor Reading : 4704 (+/- 0) RPM Status : ok Nominal Reading : 7497.000 Normal Minimum : 2499.000 Normal Maximum : 12495.000 Lower critical : 1715.000 Lower non-critical : 1960.000 Positive Hysteresis : 49.000 Negative Hysteresis : 49.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc Settable Thresholds : lcr lnc Threshold Read Mask : lcr lnc Assertion Events : Assertions Enabled : lnc- lcr- Deassertions Enabled : lnc- lcr- Sensor ID : System Fan 3 (0x34) Entity ID : 29.3 (Fan Device) Sensor Type (Analog) : Fan Sensor Reading : 4704 (+/- 0) RPM Status : ok Nominal Reading : 7497.000 Normal Minimum : 2499.000 Normal Maximum : 12495.000 Lower critical : 1715.000 Lower non-critical : 1960.000 Positive Hysteresis : 49.000 Negative Hysteresis : 49.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc Settable Thresholds : lcr lnc Threshold Read Mask : lcr lnc Assertion Events : Assertions Enabled : lnc- lcr- Deassertions Enabled : lnc- lcr- Sensor ID : System Fan 4 (0x36) Entity ID : 29.4 (Fan Device) Sensor Type (Analog) : Fan Sensor Reading : 4606 (+/- 0) RPM Status : ok Nominal Reading : 7497.000 Normal Minimum : 2499.000 Normal Maximum : 12495.000 Lower critical : 1715.000 Lower non-critical : 1960.000 Positive Hysteresis : 49.000 Negative Hysteresis : 49.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc Settable Thresholds : lcr lnc Threshold Read Mask : lcr lnc Assertion Events : Assertions Enabled : lnc- lcr- Deassertions Enabled : lnc- lcr- """ sensor_status_cmd = 'ipmitoolraw0x0a0x2c0x00' init_sensor_cmd = 'ipmitoolraw0x0a0x2c0x01' sdr_dump_cmd = 'ipmitoolsdrdump' sdr_info_cmd = 'ipmitoolsdrinfo' read_sensor_all_cmd = 'ipmitoolsdr-v' read_sensor_temperature_cmd = 'ipmitoolsdr-vtypeTemperature' read_sensor_voltage_cmd = 'ipmitoolsdr-vtypeVoltage' read_sensor_current_cmd = 'ipmitoolsdr-vtypeCurrent' read_sensor_fan_cmd = 'ipmitoolsdr-vtypeFan' device_id_cmd = 'ipmitoolraw0x060x01' nm_device_id_cmd = 'ipmitool-b0x6-t0x2craw0x060x01' nm_version_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xca0x570x010x00' get_power_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x010x000x00' get_inlet_temp_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x020x000x00' get_outlet_temp_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x050x000x00' get_airflow_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x040x000x00' get_cups_index_cmd = 'ipmitool-b0x6-t0x2craw0x2e0x650x570x010x000x01' get_cups_util_cmd = 'ipmitool-b0x6-t0x2craw0x2e0x650x570x010x000x05' device_id = (' 21 01 01 04 02 bf 57 01 00 49 00 01 07 50 0b', '') nm_device_id = (' 50 01 02 15 02 21 57 01 00 02 0b 02 09 10 01', '') nm_version_v2 = (' 57 01 00 03 02 00 02 15', '') nm_version_v3 = (' 57 01 00 05 03 00 03 06', '') # start from byte 3, get cur- 57 00(87), min- 03 00(3) # max- 37 02(567), avg- 5c 00(92) power_data = (' 57 01 00 57 00 03 00 37 02 5c 00 cc 37 f4 53 ce\n' ' 9b 12 01 50\n', '') # start from byte 3, get cur- 17 00(23), min- 16 00(22) # max- 18 00(24), avg- 17 00(23) inlet_temperature_data = (' 57 01 00 17 00 16 00 18 00 17 00 f3 6f fe 53 85\n' ' b7 02 00 50\n', '') # start from byte 3, get cur- 19 00(25), min- 18 00(24) # max- 1b 00(27), avg- 19 00(25) outlet_temperature_data = (' 57 01 00 19 00 18 00 1b 00 19 00 f3 6f fe 53 85\n' ' b7 02 00 50\n', '') # start from byte 3, get cur- be 00(190), min- 96 00(150) # max- 26 02(550), avg- cb 00(203) airflow_data = (' 57 01 00 be 00 96 00 26 02 cb 00 e1 65 c1 54 db\n' ' b7 02 00 50\n', '') # start from byte 3, cups index 2e 00 (46) cups_index_data = (' 57 01 00 2e 00\n', '') # start from byte 3, get cup_util - 33 00 ...(51), mem_util - 05 00 ...(5) # io_util - 00 00 ...(0) cups_util_data = (' 57 01 00 33 00 00 00 00 00 00 00 05 00 00 00 00\n' ' 00 00 00 00 00 00 00 00 00 00 00\n', '') sdr_info = ('', '') sensor_temperature = (sensor_temperature_data, '') sensor_voltage = (sensor_voltage_data, '') sensor_current = (sensor_current_data, '') sensor_fan = (sensor_fan_data, '') ceilometer-6.1.5/ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py0000664000567000056710000001075113072744703030022 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import base from ceilometer.ipmi.platform import ipmi_sensor from ceilometer.tests.unit.ipmi.platform import fake_utils from ceilometer import utils class TestIPMISensor(base.BaseTestCase): def setUp(self): super(TestIPMISensor, self).setUp() utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v2) self.ipmi = ipmi_sensor.IPMISensor() @classmethod def tearDownClass(cls): # reset inited to force an initialization of singleton for next test ipmi_sensor.IPMISensor()._inited = False super(TestIPMISensor, cls).tearDownClass() def test_read_sensor_temperature(self): sensors = self.ipmi.read_sensor_any('Temperature') self.assertTrue(self.ipmi.ipmi_support) # only temperature data returned. self.assertIn('Temperature', sensors) self.assertEqual(1, len(sensors)) # 4 sensor data in total, ignore 1 without 'Sensor Reading'. # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py self.assertEqual(3, len(sensors['Temperature'])) sensor = sensors['Temperature']['BB P1 VR Temp (0x20)'] self.assertEqual('25 (+/- 0) degrees C', sensor['Sensor Reading']) def test_read_sensor_voltage(self): sensors = self.ipmi.read_sensor_any('Voltage') # only voltage data returned. self.assertIn('Voltage', sensors) self.assertEqual(1, len(sensors)) # 4 sensor data in total, ignore 1 without 'Sensor Reading'. # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py self.assertEqual(3, len(sensors['Voltage'])) sensor = sensors['Voltage']['BB +5.0V (0xd1)'] self.assertEqual('4.959 (+/- 0) Volts', sensor['Sensor Reading']) def test_read_sensor_current(self): sensors = self.ipmi.read_sensor_any('Current') # only Current data returned. self.assertIn('Current', sensors) self.assertEqual(1, len(sensors)) # 2 sensor data in total. # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py self.assertEqual(2, len(sensors['Current'])) sensor = sensors['Current']['PS1 Curr Out % (0x58)'] self.assertEqual('11 (+/- 0) unspecified', sensor['Sensor Reading']) def test_read_sensor_fan(self): sensors = self.ipmi.read_sensor_any('Fan') # only Fan data returned. self.assertIn('Fan', sensors) self.assertEqual(1, len(sensors)) # 2 sensor data in total. # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py self.assertEqual(4, len(sensors['Fan'])) sensor = sensors['Fan']['System Fan 2 (0x32)'] self.assertEqual('4704 (+/- 0) RPM', sensor['Sensor Reading']) class TestNonIPMISensor(base.BaseTestCase): def setUp(self): super(TestNonIPMISensor, self).setUp() utils.execute = mock.Mock(side_effect=fake_utils.execute_without_ipmi) self.ipmi = ipmi_sensor.IPMISensor() @classmethod def tearDownClass(cls): # reset inited to force an initialization of singleton for next test ipmi_sensor.IPMISensor()._inited = False super(TestNonIPMISensor, cls).tearDownClass() def test_read_sensor_temperature(self): sensors = self.ipmi.read_sensor_any('Temperature') self.assertFalse(self.ipmi.ipmi_support) # Non-IPMI platform return empty data self.assertEqual({}, sensors) def test_read_sensor_voltage(self): sensors = self.ipmi.read_sensor_any('Voltage') # Non-IPMI platform return empty data self.assertEqual({}, sensors) def test_read_sensor_current(self): sensors = self.ipmi.read_sensor_any('Current') # Non-IPMI platform return empty data self.assertEqual({}, sensors) def test_read_sensor_fan(self): sensors = self.ipmi.read_sensor_any('Fan') # Non-IPMI platform return empty data self.assertEqual({}, sensors) ceilometer-6.1.5/ceilometer/tests/unit/ipmi/platform/test_intel_node_manager.py0000664000567000056710000001315413072744706031310 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import mock from oslotest import base import six from ceilometer.ipmi.platform import intel_node_manager as node_manager from ceilometer.tests.unit.ipmi.platform import fake_utils from ceilometer import utils @six.add_metaclass(abc.ABCMeta) class _Base(base.BaseTestCase): @abc.abstractmethod def init_test_engine(self): """Prepare specific ipmitool as engine for different NM version.""" def setUp(self): super(_Base, self).setUp() self.init_test_engine() self.nm = node_manager.NodeManager() @classmethod def tearDownClass(cls): # reset inited to force an initialization of singleton for next test node_manager.NodeManager()._inited = False super(_Base, cls).tearDownClass() class TestNodeManagerV3(_Base): def init_test_engine(self): utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v3) def test_read_airflow(self): airflow = self.nm.read_airflow() avg_val = node_manager._hex(airflow["Average_value"]) max_val = node_manager._hex(airflow["Maximum_value"]) min_val = node_manager._hex(airflow["Minimum_value"]) cur_val = node_manager._hex(airflow["Current_value"]) # get NM 3.0 self.assertEqual(5, self.nm.nm_version) # see ipmi_test_data.py for raw data self.assertEqual(190, cur_val) self.assertEqual(150, min_val) self.assertEqual(550, max_val) self.assertEqual(203, avg_val) def test_read_outlet_temperature(self): temperature = self.nm.read_outlet_temperature() avg_val = node_manager._hex(temperature["Average_value"]) max_val = node_manager._hex(temperature["Maximum_value"]) min_val = node_manager._hex(temperature["Minimum_value"]) cur_val = node_manager._hex(temperature["Current_value"]) # get NM 3.0 self.assertEqual(5, self.nm.nm_version) # see ipmi_test_data.py for raw data self.assertEqual(25, cur_val) self.assertEqual(24, min_val) self.assertEqual(27, max_val) self.assertEqual(25, avg_val) def test_read_cups_utilization(self): cups_util = self.nm.read_cups_utilization() cpu_util = node_manager._hex(cups_util["CPU_Utilization"]) mem_util = node_manager._hex(cups_util["Mem_Utilization"]) io_util = node_manager._hex(cups_util["IO_Utilization"]) # see ipmi_test_data.py for raw data self.assertEqual(51, cpu_util) self.assertEqual(5, mem_util) self.assertEqual(0, io_util) def test_read_cups_index(self): cups_index = self.nm.read_cups_index() index = node_manager._hex(cups_index["CUPS_Index"]) self.assertEqual(46, index) class TestNodeManager(_Base): def init_test_engine(self): utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v2) def test_read_power_all(self): power = self.nm.read_power_all() avg_val = node_manager._hex(power["Average_value"]) max_val = node_manager._hex(power["Maximum_value"]) min_val = node_manager._hex(power["Minimum_value"]) cur_val = node_manager._hex(power["Current_value"]) # get NM 2.0 self.assertEqual(3, self.nm.nm_version) # see ipmi_test_data.py for raw data self.assertEqual(87, cur_val) self.assertEqual(3, min_val) self.assertEqual(567, max_val) self.assertEqual(92, avg_val) def test_read_inlet_temperature(self): temperature = self.nm.read_inlet_temperature() avg_val = node_manager._hex(temperature["Average_value"]) max_val = node_manager._hex(temperature["Maximum_value"]) min_val = node_manager._hex(temperature["Minimum_value"]) cur_val = node_manager._hex(temperature["Current_value"]) # see ipmi_test_data.py for raw data self.assertEqual(23, cur_val) self.assertEqual(22, min_val) self.assertEqual(24, max_val) self.assertEqual(23, avg_val) def test_read_airflow(self): airflow = self.nm.read_airflow() self.assertEqual({}, airflow) def test_read_outlet_temperature(self): temperature = self.nm.read_outlet_temperature() self.assertEqual({}, temperature) def test_read_cups_utilization(self): cups_util = self.nm.read_cups_utilization() self.assertEqual({}, cups_util) def test_read_cups_index(self): cups_index = self.nm.read_cups_index() self.assertEqual({}, cups_index) class TestNonNodeManager(_Base): def init_test_engine(self): utils.execute = mock.Mock(side_effect=fake_utils.execute_without_nm) def test_read_power_all(self): # no NM support self.assertEqual(0, self.nm.nm_version) power = self.nm.read_power_all() # Non-Node Manager platform return empty data self.assertEqual({}, power) def test_read_inlet_temperature(self): temperature = self.nm.read_inlet_temperature() # Non-Node Manager platform return empty data self.assertEqual({}, temperature) ceilometer-6.1.5/ceilometer/tests/unit/ipmi/platform/__init__.py0000664000567000056710000000000013072744703026155 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/ipmi/notifications/0000775000567000056710000000000013072745164025105 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/ipmi/notifications/test_ironic.py0000664000567000056710000002046713072744703030010 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for producing IPMI sample messages from notification events. """ import mock from oslotest import base from ceilometer.ipmi.notifications import ironic as ipmi from ceilometer import sample from ceilometer.tests.unit.ipmi.notifications import ipmi_test_data class TestNotifications(base.BaseTestCase): def test_ipmi_temperature_notification(self): """Test IPMI Temperature sensor data. Based on the above ipmi_testdata the expected sample for a single temperature reading has:: * a resource_id composed from the node_uuid Sensor ID * a name composed from 'hardware.ipmi.' and 'temperature' * a volume from the first chunk of the Sensor Reading * a unit from the last chunk of the Sensor Reading * some readings are skipped if the value is 'Disabled' * metatata with the node id """ processor = ipmi.TemperatureSensorNotification(None) counters = dict([(counter.resource_id, counter) for counter in processor.process_notification( ipmi_test_data.SENSOR_DATA)]) self.assertEqual(10, len(counters), 'expected 10 temperature readings') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-dimm_gh_vr_temp_(0x3b)' ) test_counter = counters[resource_id] self.assertEqual(26.0, test_counter.volume) self.assertEqual('C', test_counter.unit) self.assertEqual(sample.TYPE_GAUGE, test_counter.type) self.assertEqual('hardware.ipmi.temperature', test_counter.name) self.assertEqual('hardware.ipmi.metrics.update', test_counter.resource_metadata['event_type']) self.assertEqual('f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', test_counter.resource_metadata['node']) def test_ipmi_current_notification(self): """Test IPMI Current sensor data. A single current reading is effectively the same as temperature, modulo "current". """ processor = ipmi.CurrentSensorNotification(None) counters = dict([(counter.resource_id, counter) for counter in processor.process_notification( ipmi_test_data.SENSOR_DATA)]) self.assertEqual(1, len(counters), 'expected 1 current reading') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-avg_power_(0x2e)' ) test_counter = counters[resource_id] self.assertEqual(130.0, test_counter.volume) self.assertEqual('W', test_counter.unit) self.assertEqual(sample.TYPE_GAUGE, test_counter.type) self.assertEqual('hardware.ipmi.current', test_counter.name) def test_ipmi_fan_notification(self): """Test IPMI Fan sensor data. A single fan reading is effectively the same as temperature, modulo "fan". """ processor = ipmi.FanSensorNotification(None) counters = dict([(counter.resource_id, counter) for counter in processor.process_notification( ipmi_test_data.SENSOR_DATA)]) self.assertEqual(12, len(counters), 'expected 12 fan readings') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-fan_4a_tach_(0x46)' ) test_counter = counters[resource_id] self.assertEqual(6900.0, test_counter.volume) self.assertEqual('RPM', test_counter.unit) self.assertEqual(sample.TYPE_GAUGE, test_counter.type) self.assertEqual('hardware.ipmi.fan', test_counter.name) def test_ipmi_voltage_notification(self): """Test IPMI Voltage sensor data. A single voltage reading is effectively the same as temperature, modulo "voltage". """ processor = ipmi.VoltageSensorNotification(None) counters = dict([(counter.resource_id, counter) for counter in processor.process_notification( ipmi_test_data.SENSOR_DATA)]) self.assertEqual(4, len(counters), 'expected 4 volate readings') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-planar_vbat_(0x1c)' ) test_counter = counters[resource_id] self.assertEqual(3.137, test_counter.volume) self.assertEqual('V', test_counter.unit) self.assertEqual(sample.TYPE_GAUGE, test_counter.type) self.assertEqual('hardware.ipmi.voltage', test_counter.name) def test_disabed_skips_metric(self): """Test that a meter which a disabled volume is skipped.""" processor = ipmi.TemperatureSensorNotification(None) counters = dict([(counter.resource_id, counter) for counter in processor.process_notification( ipmi_test_data.SENSOR_DATA)]) self.assertEqual(10, len(counters), 'expected 10 temperature readings') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-mezz_card_temp_(0x35)' ) self.assertNotIn(resource_id, counters) def test_empty_payload_no_metrics_success(self): processor = ipmi.TemperatureSensorNotification(None) counters = dict([(counter.resource_id, counter) for counter in processor.process_notification( ipmi_test_data.EMPTY_PAYLOAD)]) self.assertEqual(0, len(counters), 'expected 0 readings') @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') def test_missing_sensor_data(self, mylog): processor = ipmi.TemperatureSensorNotification(None) messages = [] mylog.warning = lambda *args: messages.extend(args) list(processor.process_notification(ipmi_test_data.MISSING_SENSOR)) self.assertEqual( 'invalid sensor data for ' 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): ' "missing 'Sensor Reading' in payload", messages[0] ) @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') def test_sensor_data_malformed(self, mylog): processor = ipmi.TemperatureSensorNotification(None) messages = [] mylog.warning = lambda *args: messages.extend(args) list(processor.process_notification(ipmi_test_data.BAD_SENSOR)) self.assertEqual( 'invalid sensor data for ' 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): ' 'unable to parse sensor reading: some bad stuff', messages[0] ) @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') def test_missing_node_uuid(self, mylog): """Test for desired error message when 'node_uuid' missing. Presumably this will never happen given the way the data is created, but better defensive than dead. """ processor = ipmi.TemperatureSensorNotification(None) messages = [] mylog.warning = lambda *args: messages.extend(args) list(processor.process_notification(ipmi_test_data.NO_NODE_ID)) self.assertEqual( 'invalid sensor data for missing id: missing key in payload: ' "'node_uuid'", messages[0] ) @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') def test_missing_sensor_id(self, mylog): """Test for desired error message when 'Sensor ID' missing.""" processor = ipmi.TemperatureSensorNotification(None) messages = [] mylog.warning = lambda *args: messages.extend(args) list(processor.process_notification(ipmi_test_data.NO_SENSOR_ID)) self.assertEqual( 'invalid sensor data for missing id: missing key in payload: ' "'Sensor ID'", messages[0] ) ceilometer-6.1.5/ceilometer/tests/unit/ipmi/notifications/__init__.py0000664000567000056710000000000013072744703027202 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py0000664000567000056710000007274513072744703030462 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Sample data for test_ipmi. This data is provided as a sample of the data expected from the ipmitool driver in the Ironic project, which is the publisher of the notifications being tested. """ TEMPERATURE_DATA = { 'DIMM GH VR Temp (0x3b)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '26 (+/- 0.500) degrees C', 'Entity ID': '20.6 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'DIMM GH VR Temp (0x3b)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'CPU1 VR Temp (0x36)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '32 (+/- 0.500) degrees C', 'Entity ID': '20.1 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'CPU1 VR Temp (0x36)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'DIMM EF VR Temp (0x3a)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '26 (+/- 0.500) degrees C', 'Entity ID': '20.5 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'DIMM EF VR Temp (0x3a)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'CPU2 VR Temp (0x37)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '31 (+/- 0.500) degrees C', 'Entity ID': '20.2 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'CPU2 VR Temp (0x37)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'Ambient Temp (0x32)': { 'Status': 'ok', 'Sensor Reading': '25 (+/- 0) degrees C', 'Entity ID': '12.1 (Front Panel Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Upper non-critical': '43.000', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Upper non-recoverable': '50.000', 'Positive Hysteresis': '4.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '46.000', 'Sensor ID': 'Ambient Temp (0x32)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '25.000' }, 'Mezz Card Temp (0x35)': { 'Status': 'Disabled', 'Sensor Reading': 'Disabled', 'Entity ID': '44.1 (I/O Module)', 'Event Message Control': 'Per-threshold', 'Upper non-critical': '70.000', 'Upper non-recoverable': '85.000', 'Positive Hysteresis': '4.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'Mezz Card Temp (0x35)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '25.000' }, 'PCH Temp (0x3c)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '46 (+/- 0.500) degrees C', 'Entity ID': '45.1 (Processor/IO Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '93.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '103.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '98.000', 'Sensor ID': 'PCH Temp (0x3c)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'DIMM CD VR Temp (0x39)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '27 (+/- 0.500) degrees C', 'Entity ID': '20.4 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'DIMM CD VR Temp (0x39)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'PCI Riser 2 Temp (0x34)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '30 (+/- 0) degrees C', 'Entity ID': '16.2 (System Internal Expansion Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '70.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '85.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'PCI Riser 2 Temp (0x34)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'DIMM AB VR Temp (0x38)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '28 (+/- 0.500) degrees C', 'Entity ID': '20.3 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'DIMM AB VR Temp (0x38)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'PCI Riser 1 Temp (0x33)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '38 (+/- 0) degrees C', 'Entity ID': '16.1 (System Internal Expansion Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '70.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '85.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'PCI Riser 1 Temp (0x33)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, } CURRENT_DATA = { 'Avg Power (0x2e)': { 'Status': 'ok', 'Sensor Reading': '130 (+/- 0) Watts', 'Entity ID': '21.0 (Power Management)', 'Assertions Enabled': '', 'Event Message Control': 'Per-threshold', 'Readable Thresholds': 'No Thresholds', 'Positive Hysteresis': 'Unspecified', 'Sensor Type (Analog)': 'Current', 'Negative Hysteresis': 'Unspecified', 'Maximum sensor range': 'Unspecified', 'Sensor ID': 'Avg Power (0x2e)', 'Assertion Events': '', 'Minimum sensor range': '2550.000', 'Settable Thresholds': 'No Thresholds' } } FAN_DATA = { 'Fan 4A Tach (0x46)': { 'Status': 'ok', 'Sensor Reading': '6900 (+/- 0) RPM', 'Entity ID': '29.4 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 4A Tach (0x46)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 5A Tach (0x48)': { 'Status': 'ok', 'Sensor Reading': '7140 (+/- 0) RPM', 'Entity ID': '29.5 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 5A Tach (0x48)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 3A Tach (0x44)': { 'Status': 'ok', 'Sensor Reading': '6900 (+/- 0) RPM', 'Entity ID': '29.3 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 3A Tach (0x44)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 1A Tach (0x40)': { 'Status': 'ok', 'Sensor Reading': '6960 (+/- 0) RPM', 'Entity ID': '29.1 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 1A Tach (0x40)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 3B Tach (0x45)': { 'Status': 'ok', 'Sensor Reading': '7104 (+/- 0) RPM', 'Entity ID': '29.3 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 3B Tach (0x45)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 2A Tach (0x42)': { 'Status': 'ok', 'Sensor Reading': '7080 (+/- 0) RPM', 'Entity ID': '29.2 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 2A Tach (0x42)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 4B Tach (0x47)': { 'Status': 'ok', 'Sensor Reading': '7488 (+/- 0) RPM', 'Entity ID': '29.4 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 4B Tach (0x47)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 2B Tach (0x43)': { 'Status': 'ok', 'Sensor Reading': '7168 (+/- 0) RPM', 'Entity ID': '29.2 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 2B Tach (0x43)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 5B Tach (0x49)': { 'Status': 'ok', 'Sensor Reading': '7296 (+/- 0) RPM', 'Entity ID': '29.5 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 5B Tach (0x49)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 1B Tach (0x41)': { 'Status': 'ok', 'Sensor Reading': '7296 (+/- 0) RPM', 'Entity ID': '29.1 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 1B Tach (0x41)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 6B Tach (0x4b)': { 'Status': 'ok', 'Sensor Reading': '7616 (+/- 0) RPM', 'Entity ID': '29.6 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 6B Tach (0x4b)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 6A Tach (0x4a)': { 'Status': 'ok', 'Sensor Reading': '7080 (+/- 0) RPM', 'Entity ID': '29.6 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 6A Tach (0x4a)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' } } VOLTAGE_DATA = { 'Planar 12V (0x18)': { 'Status': 'ok', 'Sensor Reading': '12.312 (+/- 0) Volts', 'Entity ID': '7.1 (System Board)', 'Assertions Enabled': 'lcr- ucr+', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Maximum sensor range': 'Unspecified', 'Positive Hysteresis': '0.108', 'Deassertions Enabled': 'lcr- ucr+', 'Sensor Type (Analog)': 'Voltage', 'Lower critical': '10.692', 'Negative Hysteresis': '0.108', 'Threshold Read Mask': 'lcr ucr', 'Upper critical': '13.446', 'Readable Thresholds': 'lcr ucr', 'Sensor ID': 'Planar 12V (0x18)', 'Settable Thresholds': 'lcr ucr', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '12.042' }, 'Planar 3.3V (0x16)': { 'Status': 'ok', 'Sensor Reading': '3.309 (+/- 0) Volts', 'Entity ID': '7.1 (System Board)', 'Assertions Enabled': 'lcr- ucr+', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Maximum sensor range': 'Unspecified', 'Positive Hysteresis': '0.028', 'Deassertions Enabled': 'lcr- ucr+', 'Sensor Type (Analog)': 'Voltage', 'Lower critical': '3.039', 'Negative Hysteresis': '0.028', 'Threshold Read Mask': 'lcr ucr', 'Upper critical': '3.564', 'Readable Thresholds': 'lcr ucr', 'Sensor ID': 'Planar 3.3V (0x16)', 'Settable Thresholds': 'lcr ucr', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3.309' }, 'Planar VBAT (0x1c)': { 'Status': 'ok', 'Sensor Reading': '3.137 (+/- 0) Volts', 'Entity ID': '7.1 (System Board)', 'Assertions Enabled': 'lnc- lcr-', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Readable Thresholds': 'lcr lnc', 'Positive Hysteresis': '0.025', 'Deassertions Enabled': 'lnc- lcr-', 'Sensor Type (Analog)': 'Voltage', 'Lower critical': '2.095', 'Negative Hysteresis': '0.025', 'Lower non-critical': '2.248', 'Maximum sensor range': 'Unspecified', 'Sensor ID': 'Planar VBAT (0x1c)', 'Settable Thresholds': 'lcr lnc', 'Threshold Read Mask': 'lcr lnc', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3.010' }, 'Planar 5V (0x17)': { 'Status': 'ok', 'Sensor Reading': '5.062 (+/- 0) Volts', 'Entity ID': '7.1 (System Board)', 'Assertions Enabled': 'lcr- ucr+', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Maximum sensor range': 'Unspecified', 'Positive Hysteresis': '0.045', 'Deassertions Enabled': 'lcr- ucr+', 'Sensor Type (Analog)': 'Voltage', 'Lower critical': '4.475', 'Negative Hysteresis': '0.045', 'Threshold Read Mask': 'lcr ucr', 'Upper critical': '5.582', 'Readable Thresholds': 'lcr ucr', 'Sensor ID': 'Planar 5V (0x17)', 'Settable Thresholds': 'lcr ucr', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4.995' } } SENSOR_DATA = { 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '20140223134852', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': TEMPERATURE_DATA, 'Current': CURRENT_DATA, 'Fan': FAN_DATA, 'Voltage': VOLTAGE_DATA } } } EMPTY_PAYLOAD = { 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '20140223134852', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { } } } MISSING_SENSOR = { 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '20140223134852', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': { 'PCI Riser 1 Temp (0x33)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Entity ID': '16.1 (System Internal Expansion Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '70.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '85.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'PCI Riser 1 Temp (0x33)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, } } } } BAD_SENSOR = { 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '20140223134852', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': { 'PCI Riser 1 Temp (0x33)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': 'some bad stuff', 'Entity ID': '16.1 (System Internal Expansion Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '70.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '85.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'PCI Riser 1 Temp (0x33)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, } } } } NO_SENSOR_ID = { 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '20140223134852', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': { 'PCI Riser 1 Temp (0x33)': { 'Sensor Reading': '26 C', }, } } } } NO_NODE_ID = { 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '20140223134852', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': { 'PCI Riser 1 Temp (0x33)': { 'Sensor Reading': '26 C', 'Sensor ID': 'PCI Riser 1 Temp (0x33)', }, } } } } ceilometer-6.1.5/ceilometer/tests/unit/ipmi/__init__.py0000664000567000056710000000000013072744703024331 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/agent/0000775000567000056710000000000013072745164022374 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/agent/test_manager.py0000664000567000056710000004530413072744706025426 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer agent manager""" import shutil from keystoneclient import exceptions as ks_exceptions import mock from novaclient import client as novaclient from oslo_config import fixture as fixture_config from oslo_service import service as os_service from oslo_utils import fileutils from oslotest import base from oslotest import mockpatch import requests import six from stevedore import extension import yaml from ceilometer.agent import manager from ceilometer.agent import plugin_base from ceilometer.hardware import discovery from ceilometer import pipeline from ceilometer.tests.unit.agent import agentbase class PollingException(Exception): pass class TestPollsterBuilder(agentbase.TestPollster): @classmethod def build_pollsters(cls): return [('builder1', cls()), ('builder2', cls())] class TestManager(base.BaseTestCase): def setUp(self): super(TestManager, self).setUp() self.conf = self.useFixture(fixture_config.Config()).conf self.conf(args=[]) @mock.patch('ceilometer.pipeline.setup_polling', mock.MagicMock()) def test_load_plugins(self): mgr = manager.AgentManager() self.assertIsNotNone(list(mgr.extensions)) def test_load_plugins_pollster_list(self): mgr = manager.AgentManager(pollster_list=['disk.*']) # currently we do have 26 disk-related pollsters self.assertEqual(26, len(list(mgr.extensions))) def test_load_plugins_no_intersection(self): # Let's test nothing will be polled if namespace and pollsters # list have no intersection. mgr = manager.AgentManager(namespaces=['compute'], pollster_list=['storage.*']) self.assertEqual(0, len(list(mgr.extensions))) # Test plugin load behavior based on Node Manager pollsters. # pollster_list is just a filter, so sensor pollsters under 'ipmi' # namespace would be also instanced. Still need mock __init__ for it. @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', mock.Mock(return_value=None)) @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', mock.Mock(return_value=None)) def test_load_normal_plugins(self): mgr = manager.AgentManager(namespaces=['ipmi'], pollster_list=['hardware.ipmi.node.*']) # 8 pollsters for Node Manager self.assertEqual(8, len(mgr.extensions)) # Skip loading pollster upon ExtensionLoadError @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', mock.Mock(side_effect=plugin_base.ExtensionLoadError)) @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', mock.Mock(return_value=None)) @mock.patch('ceilometer.agent.manager.LOG') def test_load_failed_plugins(self, LOG): # Here we additionally check that namespaces will be converted to the # list if param was not set as a list. mgr = manager.AgentManager(namespaces='ipmi', pollster_list=['hardware.ipmi.node.*']) # 0 pollsters self.assertEqual(0, len(mgr.extensions)) err_msg = 'Skip loading extension for hardware.ipmi.node.%s' pollster_names = [ 'power', 'temperature', 'outlet_temperature', 'airflow', 'cups', 'cpu_util', 'mem_util', 'io_util'] calls = [mock.call(err_msg % n) for n in pollster_names] LOG.exception.assert_has_calls(calls=calls, any_order=True) # Skip loading pollster upon ImportError @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', mock.Mock(side_effect=ImportError)) @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', mock.Mock(return_value=None)) def test_import_error_in_plugin(self): mgr = manager.AgentManager(namespaces=['ipmi'], pollster_list=['hardware.ipmi.node.*']) # 0 pollsters self.assertEqual(0, len(mgr.extensions)) # Exceptions other than ExtensionLoadError are propagated @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', mock.Mock(side_effect=PollingException)) @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', mock.Mock(return_value=None)) def test_load_exceptional_plugins(self): self.assertRaises(PollingException, manager.AgentManager, ['ipmi'], ['hardware.ipmi.node.*']) def test_load_plugins_pollster_list_forbidden(self): manager.cfg.CONF.set_override('backend_url', 'http://', group='coordination') self.assertRaises(manager.PollsterListForbidden, manager.AgentManager, pollster_list=['disk.*']) manager.cfg.CONF.reset() def test_builder(self): @staticmethod def fake_get_ext_mgr(namespace): if 'builder' in namespace: return extension.ExtensionManager.make_test_instance( [ extension.Extension('builder', None, TestPollsterBuilder, None), ] ) else: return extension.ExtensionManager.make_test_instance( [ extension.Extension('test', None, None, agentbase.TestPollster()), ] ) with mock.patch.object(manager.AgentManager, '_get_ext_mgr', new=fake_get_ext_mgr): mgr = manager.AgentManager(namespaces=['central']) self.assertEqual(3, len(mgr.extensions)) for ext in mgr.extensions: self.assertIn(ext.name, ['builder1', 'builder2', 'test']) self.assertIsInstance(ext.obj, agentbase.TestPollster) class TestPollsterKeystone(agentbase.TestPollster): def get_samples(self, manager, cache, resources): # Just try to use keystone, that will raise an exception manager.keystone.projects.list() class TestPollsterPollingException(agentbase.TestPollster): polling_failures = 0 def get_samples(self, manager, cache, resources): func = super(TestPollsterPollingException, self).get_samples sample = func(manager=manager, cache=cache, resources=resources) # Raise polling exception after 2 times self.polling_failures += 1 if self.polling_failures > 2: raise plugin_base.PollsterPermanentError(resources) return sample class TestRunTasks(agentbase.BaseAgentManagerTestCase): class PollsterKeystone(TestPollsterKeystone): samples = [] resources = [] test_data = agentbase.TestSample( name='testkeystone', type=agentbase.default_test_data.type, unit=agentbase.default_test_data.unit, volume=agentbase.default_test_data.volume, user_id=agentbase.default_test_data.user_id, project_id=agentbase.default_test_data.project_id, resource_id=agentbase.default_test_data.resource_id, timestamp=agentbase.default_test_data.timestamp, resource_metadata=agentbase.default_test_data.resource_metadata) class PollsterPollingException(TestPollsterPollingException): samples = [] resources = [] test_data = agentbase.TestSample( name='testpollingexception', type=agentbase.default_test_data.type, unit=agentbase.default_test_data.unit, volume=agentbase.default_test_data.volume, user_id=agentbase.default_test_data.user_id, project_id=agentbase.default_test_data.project_id, resource_id=agentbase.default_test_data.resource_id, timestamp=agentbase.default_test_data.timestamp, resource_metadata=agentbase.default_test_data.resource_metadata) @staticmethod def create_manager(): return manager.AgentManager() @staticmethod def setup_pipeline_file(pipeline): if six.PY3: pipeline = pipeline.encode('utf-8') pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, prefix="pipeline", suffix="yaml") return pipeline_cfg_file def fake_notifier_sample(self, ctxt, event_type, payload): for m in payload['samples']: del m['message_signature'] self.notified_samples.append(m) def setUp(self): self.notified_samples = [] self.notifier = mock.Mock() self.notifier.sample.side_effect = self.fake_notifier_sample self.useFixture(mockpatch.Patch('oslo_messaging.Notifier', return_value=self.notifier)) self.source_resources = True super(TestRunTasks, self).setUp() self.useFixture(mockpatch.Patch( 'keystoneclient.v2_0.client.Client', return_value=mock.Mock())) def tearDown(self): self.PollsterKeystone.samples = [] self.PollsterKeystone.resources = [] self.PollsterPollingException.samples = [] self.PollsterPollingException.resources = [] super(TestRunTasks, self).tearDown() def create_extension_list(self): exts = super(TestRunTasks, self).create_extension_list() exts.extend([extension.Extension('testkeystone', None, None, self.PollsterKeystone(), ), extension.Extension('testpollingexception', None, None, self.PollsterPollingException(), )]) return exts def test_get_sample_resources(self): polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(list(polling_tasks.values())[0]) self.assertTrue(self.Pollster.resources) def test_when_keystone_fail(self): """Test for bug 1316532.""" self.useFixture(mockpatch.Patch( 'keystoneclient.v2_0.client.Client', side_effect=ks_exceptions.ClientException)) self.pipeline_cfg = { 'sources': [{ 'name': "test_keystone", 'interval': 10, 'meters': ['testkeystone'], 'resources': ['test://'] if self.source_resources else [], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] } self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(list(polling_tasks.values())[0]) self.assertFalse(self.PollsterKeystone.samples) self.assertFalse(self.notified_samples) @mock.patch('ceilometer.agent.manager.LOG') @mock.patch('ceilometer.nova_client.LOG') def test_hardware_discover_fail_minimize_logs(self, novalog, baselog): self.useFixture(mockpatch.PatchObject( novaclient.HTTPClient, 'authenticate', side_effect=requests.ConnectionError)) class PollsterHardware(agentbase.TestPollster): discovery = 'tripleo_overcloud_nodes' class PollsterHardwareAnother(agentbase.TestPollster): discovery = 'tripleo_overcloud_nodes' self.mgr.extensions.extend([ extension.Extension('testhardware', None, None, PollsterHardware(), ), extension.Extension('testhardware2', None, None, PollsterHardwareAnother(), ) ]) ext = extension.Extension('tripleo_overcloud_nodes', None, None, discovery.NodesDiscoveryTripleO()) self.mgr.discovery_manager = (extension.ExtensionManager .make_test_instance([ext])) self.pipeline_cfg = { 'sources': [{ 'name': "test_hardware", 'interval': 10, 'meters': ['testhardware', 'testhardware2'], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] } self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(list(polling_tasks.values())[0]) self.assertEqual(1, novalog.exception.call_count) self.assertFalse(baselog.exception.called) @mock.patch('ceilometer.agent.manager.LOG') def test_polling_exception(self, LOG): source_name = 'test_pollingexception' self.pipeline_cfg = { 'sources': [{ 'name': source_name, 'interval': 10, 'meters': ['testpollingexception'], 'resources': ['test://'] if self.source_resources else [], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] } self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) polling_task = list(self.mgr.setup_polling_tasks().values())[0] pollster = list(polling_task.pollster_matches[source_name])[0] # 2 samples after 4 pollings, as pollster got disabled upon exception for x in range(0, 4): self.mgr.interval_task(polling_task) samples = self.notified_samples self.assertEqual(2, len(samples)) LOG.error.assert_called_once_with(( 'Prevent pollster %(name)s for ' 'polling source %(source)s anymore!') % ({'name': pollster.name, 'source': source_name})) def test_batching_polled_samples_false(self): self.CONF.set_override('batch_polled_samples', False) self._batching_samples(4, 4) def test_batching_polled_samples_true(self): self.CONF.set_override('batch_polled_samples', True) self._batching_samples(4, 1) def test_batching_polled_samples_default(self): self._batching_samples(4, 1) def _batching_samples(self, expected_samples, call_count): pipeline = yaml.dump({ 'sources': [{ 'name': 'test_pipeline', 'interval': 1, 'meters': ['testbatch'], 'resources': ['alpha', 'beta', 'gamma', 'delta'], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] }) pipeline_cfg_file = self.setup_pipeline_file(pipeline) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self.mgr.tg = os_service.threadgroup.ThreadGroup(1000) self.mgr.start() # Manually executes callbacks for timer in self.mgr.pollster_timers: timer.f(*timer.args, **timer.kw) samples = self.notified_samples self.assertEqual(expected_samples, len(samples)) self.assertEqual(call_count, self.notifier.sample.call_count) def test_start_with_reloadable_pipeline(self): self.CONF.set_override('heartbeat', 1.0, group='coordination') self.CONF.set_override('refresh_pipeline_cfg', True) self.CONF.set_override('pipeline_polling_interval', 2) pipeline = yaml.dump({ 'sources': [{ 'name': 'test_pipeline', 'interval': 1, 'meters': ['test'], 'resources': ['test://'] if self.source_resources else [], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] }) pipeline_cfg_file = self.setup_pipeline_file(pipeline) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self.mgr.tg = os_service.threadgroup.ThreadGroup(1000) self.mgr.start() # we only got the old name of meters for sample in self.notified_samples: self.assertEqual('test', sample['counter_name']) self.assertEqual(1, sample['counter_volume']) self.assertEqual('test_run_tasks', sample['resource_id']) # Modify the collection targets pipeline = yaml.dump({ 'sources': [{ 'name': 'test_pipeline', 'interval': 1, 'meters': ['testanother'], 'resources': ['test://'] if self.source_resources else [], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] }) updated_pipeline_cfg_file = self.setup_pipeline_file(pipeline) # Move/re-name the updated pipeline file to the original pipeline # file path as recorded in oslo config shutil.move(updated_pipeline_cfg_file, pipeline_cfg_file) # Flush notified samples to test only new, nothing latent on # fake message bus. self.notified_samples = [] # we only got the new name of meters for sample in self.notified_samples: self.assertEqual('testanother', sample['counter_name']) self.assertEqual(1, sample['counter_volume']) self.assertEqual('test_run_tasks', sample['resource_id']) ceilometer-6.1.5/ceilometer/tests/unit/agent/agentbase.py0000664000567000056710000007322413072744706024710 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 Intel corp. # Copyright 2013 eNovance # Copyright 2014 Red Hat, Inc # # Authors: Yunhong Jiang # Julien Danjou # Eoghan Glynn # Nejc Saje # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import copy import datetime import mock from oslo_config import fixture as fixture_config from oslotest import mockpatch import six from stevedore import extension from ceilometer.agent import plugin_base from ceilometer import pipeline from ceilometer import publisher from ceilometer.publisher import test as test_publisher from ceilometer import sample from ceilometer.tests import base from ceilometer import utils class TestSample(sample.Sample): def __init__(self, name, type, unit, volume, user_id, project_id, resource_id, timestamp, resource_metadata, source=None): super(TestSample, self).__init__(name, type, unit, volume, user_id, project_id, resource_id, timestamp, resource_metadata, source) def __eq__(self, other): if isinstance(other, self.__class__): return self.__dict__ == other.__dict__ return False def __ne__(self, other): return not self.__eq__(other) default_test_data = TestSample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'Pollster'}, ) class TestPollster(plugin_base.PollsterBase): test_data = default_test_data discovery = None @property def default_discovery(self): return self.discovery def get_samples(self, manager, cache, resources): resources = resources or [] self.samples.append((manager, resources)) self.resources.extend(resources) c = copy.deepcopy(self.test_data) c.resource_metadata['resources'] = resources return [c] class BatchTestPollster(TestPollster): test_data = default_test_data discovery = None @property def default_discovery(self): return self.discovery def get_samples(self, manager, cache, resources): resources = resources or [] self.samples.append((manager, resources)) self.resources.extend(resources) for resource in resources: c = copy.deepcopy(self.test_data) c.timestamp = datetime.datetime.utcnow().isoformat() c.resource_id = resource c.resource_metadata['resource'] = resource yield c class TestPollsterException(TestPollster): def get_samples(self, manager, cache, resources): resources = resources or [] self.samples.append((manager, resources)) self.resources.extend(resources) raise Exception() class TestDiscovery(plugin_base.DiscoveryBase): def discover(self, manager, param=None): self.params.append(param) return self.resources class TestDiscoveryException(plugin_base.DiscoveryBase): def discover(self, manager, param=None): self.params.append(param) raise Exception() @six.add_metaclass(abc.ABCMeta) class BaseAgentManagerTestCase(base.BaseTestCase): class Pollster(TestPollster): samples = [] resources = [] test_data = default_test_data class BatchPollster(BatchTestPollster): samples = [] resources = [] test_data = default_test_data class PollsterAnother(TestPollster): samples = [] resources = [] test_data = TestSample( name='testanother', type=default_test_data.type, unit=default_test_data.unit, volume=default_test_data.volume, user_id=default_test_data.user_id, project_id=default_test_data.project_id, resource_id=default_test_data.resource_id, timestamp=default_test_data.timestamp, resource_metadata=default_test_data.resource_metadata) class PollsterException(TestPollsterException): samples = [] resources = [] test_data = TestSample( name='testexception', type=default_test_data.type, unit=default_test_data.unit, volume=default_test_data.volume, user_id=default_test_data.user_id, project_id=default_test_data.project_id, resource_id=default_test_data.resource_id, timestamp=default_test_data.timestamp, resource_metadata=default_test_data.resource_metadata) class PollsterExceptionAnother(TestPollsterException): samples = [] resources = [] test_data = TestSample( name='testexceptionanother', type=default_test_data.type, unit=default_test_data.unit, volume=default_test_data.volume, user_id=default_test_data.user_id, project_id=default_test_data.project_id, resource_id=default_test_data.resource_id, timestamp=default_test_data.timestamp, resource_metadata=default_test_data.resource_metadata) class Discovery(TestDiscovery): params = [] resources = [] class DiscoveryAnother(TestDiscovery): params = [] resources = [] @property def group_id(self): return 'another_group' class DiscoveryException(TestDiscoveryException): params = [] def setup_polling(self): self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) def create_extension_list(self): return [extension.Extension('test', None, None, self.Pollster(), ), extension.Extension('testbatch', None, None, self.BatchPollster(), ), extension.Extension('testanother', None, None, self.PollsterAnother(), ), extension.Extension('testexception', None, None, self.PollsterException(), ), extension.Extension('testexceptionanother', None, None, self.PollsterExceptionAnother(), )] def create_discovery_manager(self): return extension.ExtensionManager.make_test_instance( [ extension.Extension( 'testdiscovery', None, None, self.Discovery(), ), extension.Extension( 'testdiscoveryanother', None, None, self.DiscoveryAnother(), ), extension.Extension( 'testdiscoveryexception', None, None, self.DiscoveryException(), ), ], ) @abc.abstractmethod def create_manager(self): """Return subclass specific manager.""" @mock.patch('ceilometer.pipeline.setup_polling', mock.MagicMock()) def setUp(self): super(BaseAgentManagerTestCase, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF.set_override( 'pipeline_cfg_file', self.path_get('etc/ceilometer/pipeline.yaml') ) self.CONF(args=[]) self.mgr = self.create_manager() self.mgr.extensions = self.create_extension_list() self.mgr.partition_coordinator = mock.MagicMock() fake_subset = lambda _, x: x p_coord = self.mgr.partition_coordinator p_coord.extract_my_subset.side_effect = fake_subset self.mgr.tg = mock.MagicMock() self.pipeline_cfg = { 'sources': [{ 'name': 'test_pipeline', 'interval': 60, 'meters': ['test'], 'resources': ['test://'] if self.source_resources else [], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] } self.setup_polling() self.useFixture(mockpatch.PatchObject( publisher, 'get_publisher', side_effect=self.get_publisher)) @staticmethod def get_publisher(url, namespace=''): fake_drivers = {'test://': test_publisher.TestPublisher, 'new://': test_publisher.TestPublisher, 'rpc://': test_publisher.TestPublisher} return fake_drivers[url](url) def tearDown(self): self.Pollster.samples = [] self.Pollster.discovery = [] self.PollsterAnother.samples = [] self.PollsterAnother.discovery = [] self.PollsterException.samples = [] self.PollsterException.discovery = [] self.PollsterExceptionAnother.samples = [] self.PollsterExceptionAnother.discovery = [] self.Pollster.resources = [] self.PollsterAnother.resources = [] self.PollsterException.resources = [] self.PollsterExceptionAnother.resources = [] self.Discovery.params = [] self.DiscoveryAnother.params = [] self.DiscoveryException.params = [] self.Discovery.resources = [] self.DiscoveryAnother.resources = [] super(BaseAgentManagerTestCase, self).tearDown() @mock.patch('ceilometer.pipeline.setup_polling') def test_start(self, setup_polling): self.mgr.join_partitioning_groups = mock.MagicMock() self.mgr.setup_polling_tasks = mock.MagicMock() self.CONF.set_override('heartbeat', 1.0, group='coordination') self.mgr.start() setup_polling.assert_called_once_with() self.mgr.partition_coordinator.start.assert_called_once_with() self.mgr.join_partitioning_groups.assert_called_once_with() self.mgr.setup_polling_tasks.assert_called_once_with() timer_call = mock.call(1.0, self.mgr.partition_coordinator.heartbeat) self.assertEqual([timer_call], self.mgr.tg.add_timer.call_args_list) self.mgr.stop() self.mgr.partition_coordinator.stop.assert_called_once_with() @mock.patch('ceilometer.pipeline.setup_polling') def test_start_with_pipeline_poller(self, setup_polling): self.mgr.join_partitioning_groups = mock.MagicMock() self.mgr.setup_polling_tasks = mock.MagicMock() self.CONF.set_override('heartbeat', 1.0, group='coordination') self.CONF.set_override('refresh_pipeline_cfg', True) self.CONF.set_override('pipeline_polling_interval', 5) self.mgr.start() setup_polling.assert_called_once_with() self.mgr.partition_coordinator.start.assert_called_once_with() self.mgr.join_partitioning_groups.assert_called_once_with() self.mgr.setup_polling_tasks.assert_called_once_with() timer_call = mock.call(1.0, self.mgr.partition_coordinator.heartbeat) pipeline_poller_call = mock.call(5, self.mgr.refresh_pipeline) self.assertEqual([timer_call, pipeline_poller_call], self.mgr.tg.add_timer.call_args_list) def test_join_partitioning_groups(self): self.mgr.discovery_manager = self.create_discovery_manager() self.mgr.join_partitioning_groups() p_coord = self.mgr.partition_coordinator static_group_ids = [utils.hash_of_set(p['resources']) for p in self.pipeline_cfg['sources'] if p['resources']] expected = [mock.call(self.mgr.construct_group_id(g)) for g in ['another_group', 'global'] + static_group_ids] self.assertEqual(len(expected), len(p_coord.join_group.call_args_list)) for c in expected: self.assertIn(c, p_coord.join_group.call_args_list) def test_setup_polling_tasks(self): polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) per_task_resources = polling_tasks[60].resources self.assertEqual(1, len(per_task_resources)) self.assertEqual(set(self.pipeline_cfg['sources'][0]['resources']), set(per_task_resources['test_pipeline-test'].get({}))) def test_setup_polling_tasks_multiple_interval(self): self.pipeline_cfg['sources'].append({ 'name': 'test_pipeline_1', 'interval': 10, 'meters': ['test'], 'resources': ['test://'] if self.source_resources else [], 'sinks': ['test_sink'] }) self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(2, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) self.assertIn(10, polling_tasks.keys()) def test_setup_polling_tasks_mismatch_counter(self): self.pipeline_cfg['sources'].append({ 'name': 'test_pipeline_1', 'interval': 10, 'meters': ['test_invalid'], 'resources': ['invalid://'], 'sinks': ['test_sink'] }) polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) self.assertNotIn(10, polling_tasks.keys()) def test_setup_polling_task_same_interval(self): self.pipeline_cfg['sources'].append({ 'name': 'test_pipeline_1', 'interval': 60, 'meters': ['testanother'], 'resources': ['testanother://'] if self.source_resources else [], 'sinks': ['test_sink'] }) self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) pollsters = polling_tasks.get(60).pollster_matches self.assertEqual(2, len(pollsters)) per_task_resources = polling_tasks[60].resources self.assertEqual(2, len(per_task_resources)) key = 'test_pipeline-test' self.assertEqual(set(self.pipeline_cfg['sources'][0]['resources']), set(per_task_resources[key].get({}))) key = 'test_pipeline_1-testanother' self.assertEqual(set(self.pipeline_cfg['sources'][1]['resources']), set(per_task_resources[key].get({}))) def test_agent_manager_start(self): mgr = self.create_manager() mgr.extensions = self.mgr.extensions mgr.create_polling_task = mock.MagicMock() mgr.tg = mock.MagicMock() mgr.start() self.assertTrue(mgr.tg.add_timer.called) def test_manager_exception_persistency(self): self.pipeline_cfg['sources'].append({ 'name': 'test_pipeline_1', 'interval': 60, 'meters': ['testanother'], 'sinks': ['test_sink'] }) self.setup_polling() def _verify_discovery_params(self, expected): self.assertEqual(expected, self.Discovery.params) self.assertEqual(expected, self.DiscoveryAnother.params) self.assertEqual(expected, self.DiscoveryException.params) def _do_test_per_pollster_discovery(self, discovered_resources, static_resources): self.Pollster.discovery = 'testdiscovery' self.mgr.discovery_manager = self.create_discovery_manager() self.Discovery.resources = discovered_resources self.DiscoveryAnother.resources = [d[::-1] for d in discovered_resources] if static_resources: # just so we can test that static + pre_pipeline amalgamated # override per_pollster self.pipeline_cfg['sources'][0]['discovery'] = [ 'testdiscoveryanother', 'testdiscoverynonexistent', 'testdiscoveryexception'] self.pipeline_cfg['sources'][0]['resources'] = static_resources self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) if static_resources: self.assertEqual(set(static_resources + self.DiscoveryAnother.resources), set(self.Pollster.resources)) else: self.assertEqual(set(self.Discovery.resources), set(self.Pollster.resources)) # Make sure no duplicated resource from discovery for x in self.Pollster.resources: self.assertEqual(1, self.Pollster.resources.count(x)) def test_per_pollster_discovery(self): self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], []) def test_per_pollster_discovery_overridden_by_per_pipeline_discovery(self): # ensure static+per_source_discovery overrides per_pollster_discovery self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], ['static_1', 'static_2']) def test_per_pollster_discovery_duplicated(self): self._do_test_per_pollster_discovery(['dup', 'discovered_1', 'dup'], []) def test_per_pollster_discovery_overridden_by_duplicated_static(self): self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], ['static_1', 'dup', 'dup']) def test_per_pollster_discovery_caching(self): # ensure single discovery associated with multiple pollsters # only called once per polling cycle discovered_resources = ['discovered_1', 'discovered_2'] self.Pollster.discovery = 'testdiscovery' self.PollsterAnother.discovery = 'testdiscovery' self.mgr.discovery_manager = self.create_discovery_manager() self.Discovery.resources = discovered_resources self.pipeline_cfg['sources'][0]['meters'].append('testanother') self.pipeline_cfg['sources'][0]['resources'] = [] self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) self.assertEqual(1, len(self.Discovery.params)) self.assertEqual(discovered_resources, self.Pollster.resources) self.assertEqual(discovered_resources, self.PollsterAnother.resources) def _do_test_per_pipeline_discovery(self, discovered_resources, static_resources): self.mgr.discovery_manager = self.create_discovery_manager() self.Discovery.resources = discovered_resources self.DiscoveryAnother.resources = [d[::-1] for d in discovered_resources] self.pipeline_cfg['sources'][0]['discovery'] = [ 'testdiscovery', 'testdiscoveryanother', 'testdiscoverynonexistent', 'testdiscoveryexception'] self.pipeline_cfg['sources'][0]['resources'] = static_resources self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) discovery = self.Discovery.resources + self.DiscoveryAnother.resources # compare resource lists modulo ordering self.assertEqual(set(static_resources + discovery), set(self.Pollster.resources)) # Make sure no duplicated resource from discovery for x in self.Pollster.resources: self.assertEqual(1, self.Pollster.resources.count(x)) def test_per_pipeline_discovery_discovered_only(self): self._do_test_per_pipeline_discovery(['discovered_1', 'discovered_2'], []) def test_per_pipeline_discovery_static_only(self): self._do_test_per_pipeline_discovery([], ['static_1', 'static_2']) def test_per_pipeline_discovery_discovered_augmented_by_static(self): self._do_test_per_pipeline_discovery(['discovered_1', 'discovered_2'], ['static_1', 'static_2']) def test_per_pipeline_discovery_discovered_duplicated_static(self): self._do_test_per_pipeline_discovery(['discovered_1', 'pud'], ['dup', 'static_1', 'dup']) def test_multiple_pipelines_different_static_resources(self): # assert that the individual lists of static and discovered resources # for each pipeline with a common interval are passed to individual # pollsters matching each pipeline self.pipeline_cfg['sources'][0]['resources'] = ['test://'] self.pipeline_cfg['sources'][0]['discovery'] = ['testdiscovery'] self.pipeline_cfg['sources'].append({ 'name': 'another_pipeline', 'interval': 60, 'meters': ['test'], 'resources': ['another://'], 'discovery': ['testdiscoveryanother'], 'sinks': ['test_sink_new'] }) self.mgr.discovery_manager = self.create_discovery_manager() self.Discovery.resources = ['discovered_1', 'discovered_2'] self.DiscoveryAnother.resources = ['discovered_3', 'discovered_4'] self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) self.mgr.interval_task(polling_tasks.get(60)) self.assertEqual([None], self.Discovery.params) self.assertEqual([None], self.DiscoveryAnother.params) self.assertEqual(2, len(self.Pollster.samples)) samples = self.Pollster.samples test_resources = ['test://', 'discovered_1', 'discovered_2'] another_resources = ['another://', 'discovered_3', 'discovered_4'] if samples[0][1] == test_resources: self.assertEqual(another_resources, samples[1][1]) elif samples[0][1] == another_resources: self.assertEqual(test_resources, samples[1][1]) else: self.fail('unexpected sample resources %s' % samples) def test_multiple_sources_different_discoverers(self): self.Discovery.resources = ['discovered_1', 'discovered_2'] self.DiscoveryAnother.resources = ['discovered_3', 'discovered_4'] sources = [{'name': 'test_source_1', 'interval': 60, 'meters': ['test'], 'discovery': ['testdiscovery'], 'sinks': ['test_sink_1']}, {'name': 'test_source_2', 'interval': 60, 'meters': ['testanother'], 'discovery': ['testdiscoveryanother'], 'sinks': ['test_sink_2']}] sinks = [{'name': 'test_sink_1', 'transformers': [], 'publishers': ['test://']}, {'name': 'test_sink_2', 'transformers': [], 'publishers': ['test://']}] self.pipeline_cfg = {'sources': sources, 'sinks': sinks} self.mgr.discovery_manager = self.create_discovery_manager() self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) self.mgr.interval_task(polling_tasks.get(60)) self.assertEqual(1, len(self.Pollster.samples)) self.assertEqual(['discovered_1', 'discovered_2'], self.Pollster.resources) self.assertEqual(1, len(self.PollsterAnother.samples)) self.assertEqual(['discovered_3', 'discovered_4'], self.PollsterAnother.resources) def test_multiple_sinks_same_discoverer(self): self.Discovery.resources = ['discovered_1', 'discovered_2'] sources = [{'name': 'test_source_1', 'interval': 60, 'meters': ['test'], 'discovery': ['testdiscovery'], 'sinks': ['test_sink_1', 'test_sink_2']}] sinks = [{'name': 'test_sink_1', 'transformers': [], 'publishers': ['test://']}, {'name': 'test_sink_2', 'transformers': [], 'publishers': ['test://']}] self.pipeline_cfg = {'sources': sources, 'sinks': sinks} self.mgr.discovery_manager = self.create_discovery_manager() self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) self.mgr.interval_task(polling_tasks.get(60)) self.assertEqual(1, len(self.Pollster.samples)) self.assertEqual(['discovered_1', 'discovered_2'], self.Pollster.resources) def test_discovery_partitioning(self): self.mgr.discovery_manager = self.create_discovery_manager() p_coord = self.mgr.partition_coordinator self.pipeline_cfg['sources'][0]['discovery'] = [ 'testdiscovery', 'testdiscoveryanother', 'testdiscoverynonexistent', 'testdiscoveryexception'] self.pipeline_cfg['sources'][0]['resources'] = [] self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) expected = [mock.call(self.mgr.construct_group_id(d.obj.group_id), d.obj.resources) for d in self.mgr.discovery_manager if hasattr(d.obj, 'resources')] self.assertEqual(len(expected), len(p_coord.extract_my_subset.call_args_list)) for c in expected: self.assertIn(c, p_coord.extract_my_subset.call_args_list) def test_static_resources_partitioning(self): p_coord = self.mgr.partition_coordinator static_resources = ['static_1', 'static_2'] static_resources2 = ['static_3', 'static_4'] self.pipeline_cfg['sources'][0]['resources'] = static_resources self.pipeline_cfg['sources'].append({ 'name': 'test_pipeline2', 'interval': 60, 'meters': ['test', 'test2'], 'resources': static_resources2, 'sinks': ['test_sink'] }) # have one pipeline without static resources defined self.pipeline_cfg['sources'].append({ 'name': 'test_pipeline3', 'interval': 60, 'meters': ['test', 'test2'], 'resources': [], 'sinks': ['test_sink'] }) self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) # Only two groups need to be created, one for each pipeline, # even though counter test is used twice expected = [mock.call(self.mgr.construct_group_id( utils.hash_of_set(resources)), resources) for resources in [static_resources, static_resources2]] self.assertEqual(len(expected), len(p_coord.extract_my_subset.call_args_list)) for c in expected: self.assertIn(c, p_coord.extract_my_subset.call_args_list) @mock.patch('ceilometer.agent.manager.LOG') def test_polling_and_notify_with_resources(self, LOG): self.setup_polling() polling_task = list(self.mgr.setup_polling_tasks().values())[0] polling_task.poll_and_notify() LOG.info.assert_called_with( 'Polling pollster %(poll)s in the context of %(src)s', {'poll': 'test', 'src': 'test_pipeline'}) @mock.patch('ceilometer.agent.manager.LOG') def test_skip_polling_and_notify_with_no_resources(self, LOG): self.pipeline_cfg['sources'][0]['resources'] = [] self.setup_polling() polling_task = list(self.mgr.setup_polling_tasks().values())[0] pollster = list(polling_task.pollster_matches['test_pipeline'])[0] polling_task.poll_and_notify() LOG.info.assert_called_with( 'Skip pollster %(name)s, no %(p_context)sresources found this ' 'cycle', {'name': pollster.name, 'p_context': ''}) @mock.patch('ceilometer.agent.manager.LOG') def test_skip_polling_polled_resources(self, LOG): self.pipeline_cfg['sources'].append({ 'name': 'test_pipeline_1', 'interval': 60, 'meters': ['test'], 'resources': ['test://'], 'sinks': ['test_sink'] }) self.setup_polling() polling_task = list(self.mgr.setup_polling_tasks().values())[0] polling_task.poll_and_notify() LOG.info.assert_called_with( 'Skip pollster %(name)s, no %(p_context)sresources found this ' 'cycle', {'name': 'test', 'p_context': 'new '}) ceilometer-6.1.5/ceilometer/tests/unit/agent/test_discovery.py0000664000567000056710000001011613072744706026014 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/central/manager.py """ import mock from oslo_config import fixture as fixture_config from oslotest import base from ceilometer.agent.discovery import endpoint from ceilometer.agent.discovery import localnode from ceilometer.hardware import discovery as hardware class TestEndpointDiscovery(base.BaseTestCase): def setUp(self): super(TestEndpointDiscovery, self).setUp() self.discovery = endpoint.EndpointDiscovery() self.manager = mock.MagicMock() self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF.set_override('interface', 'test-endpoint-type', group='service_credentials') self.CONF.set_override('region_name', 'test-region-name', group='service_credentials') self.catalog = (self.manager.keystone.session.auth.get_access. return_value.service_catalog) def test_keystone_called(self): self.discovery.discover(self.manager, param='test-service-type') expected = [mock.call(service_type='test-service-type', interface='test-endpoint-type', region_name='test-region-name')] self.assertEqual(expected, self.catalog.get_urls.call_args_list) def test_keystone_called_no_service_type(self): self.discovery.discover(self.manager) expected = [mock.call(service_type=None, interface='test-endpoint-type', region_name='test-region-name')] self.assertEqual(expected, self.catalog.get_urls .call_args_list) def test_keystone_called_no_endpoints(self): self.catalog.get_urls.return_value = [] self.assertEqual([], self.discovery.discover(self.manager)) class TestLocalnodeDiscovery(base.BaseTestCase): def setUp(self): super(TestLocalnodeDiscovery, self).setUp() self.discovery = localnode.LocalNodeDiscovery() self.manager = mock.MagicMock() def test_lockalnode_discovery(self): self.assertEqual(['local_host'], self.discovery.discover(self.manager)) class TestHardwareDiscovery(base.BaseTestCase): class MockInstance(object): addresses = {'ctlplane': [ {'addr': '0.0.0.0', 'OS-EXT-IPS-MAC:mac_addr': '01-23-45-67-89-ab'} ]} id = 'resource_id' image = {'id': 'image_id'} flavor = {'id': 'flavor_id'} expected = { 'resource_id': 'resource_id', 'resource_url': 'snmp://ro_snmp_user:password@0.0.0.0', 'mac_addr': '01-23-45-67-89-ab', 'image_id': 'image_id', 'flavor_id': 'flavor_id', } def setUp(self): super(TestHardwareDiscovery, self).setUp() self.discovery = hardware.NodesDiscoveryTripleO() self.discovery.nova_cli = mock.MagicMock() self.manager = mock.MagicMock() def test_hardware_discovery(self): self.discovery.nova_cli.instance_get_all.return_value = [ self.MockInstance()] resources = self.discovery.discover(self.manager) self.assertEqual(1, len(resources)) self.assertEqual(self.expected, resources[0]) def test_hardware_discovery_without_flavor(self): instance = self.MockInstance() instance.flavor = {} self.discovery.nova_cli.instance_get_all.return_value = [instance] resources = self.discovery.discover(self.manager) self.assertEqual(0, len(resources)) ceilometer-6.1.5/ceilometer/tests/unit/agent/test_plugin.py0000664000567000056710000000432313072744706025306 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as fixture_config from oslotest import base from ceilometer.agent import plugin_base class NotificationBaseTestCase(base.BaseTestCase): def setUp(self): super(NotificationBaseTestCase, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf class FakePlugin(plugin_base.NotificationBase): event_types = ['compute.*'] def process_notification(self, message): pass def get_targets(self, conf): pass def test_plugin_info(self): plugin = self.FakePlugin(mock.Mock()) plugin.to_samples_and_publish = mock.Mock() message = { 'ctxt': {'user_id': 'fake_user_id', 'project_id': 'fake_project_id'}, 'publisher_id': 'fake.publisher_id', 'event_type': 'fake.event', 'payload': {'foo': 'bar'}, 'metadata': {'message_id': '3577a84f-29ec-4904-9566-12c52289c2e8', 'timestamp': '2015-06-1909:19:35.786893'} } plugin.info([message]) notification = { 'priority': 'info', 'event_type': 'fake.event', 'timestamp': '2015-06-1909:19:35.786893', '_context_user_id': 'fake_user_id', '_context_project_id': 'fake_project_id', 'publisher_id': 'fake.publisher_id', 'payload': {'foo': 'bar'}, 'message_id': '3577a84f-29ec-4904-9566-12c52289c2e8' } plugin.to_samples_and_publish.assert_called_with(mock.ANY, notification) ceilometer-6.1.5/ceilometer/tests/unit/agent/__init__.py0000664000567000056710000000000013072744703024471 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/event/0000775000567000056710000000000013072745164022417 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/event/test_endpoint.py0000664000567000056710000001712213072744706025654 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Ceilometer notify daemon.""" import mock from oslo_config import cfg from oslo_config import fixture as fixture_config import oslo_messaging from oslo_utils import fileutils from oslotest import mockpatch import six import yaml from ceilometer.event import endpoint as event_endpoint from ceilometer import pipeline from ceilometer import publisher from ceilometer.publisher import test from ceilometer.tests import base as tests_base TEST_NOTICE_CTXT = { u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', u'is_admin': True, u'project_id': u'7c150a59fe714e6f9263774af9688f0e', u'quota_class': None, u'read_deleted': u'no', u'remote_address': u'10.0.2.15', u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', u'roles': [u'admin'], u'timestamp': u'2012-05-08T20:23:41.425105', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', } TEST_NOTICE_METADATA = { u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', u'timestamp': u'2012-05-08 20:23:48.028195', } TEST_NOTICE_PAYLOAD = { u'created_at': u'2012-05-08 20:23:41', u'deleted_at': u'', u'disk_gb': 0, u'display_name': u'testme', u'fixed_ips': [{u'address': u'10.0.0.2', u'floating_ips': [], u'meta': {}, u'type': u'fixed', u'version': 4}], u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', u'instance_type': u'm1.tiny', u'instance_type_id': 2, u'launched_at': u'2012-05-08 20:23:47.985999', u'memory_mb': 512, u'state': u'active', u'state_description': u'', u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', u'vcpus': 1, u'root_gb': 0, u'ephemeral_gb': 0, u'host': u'compute-host-name', u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', u'os_type': u'linux?', u'architecture': u'x86', u'image_ref': u'UUID', u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', } cfg.CONF.import_opt('store_events', 'ceilometer.notification', group='notification') class TestEventEndpoint(tests_base.BaseTestCase): def get_publisher(self, url, namespace=''): fake_drivers = {'test://': test.TestPublisher, 'except://': test.TestPublisher} return fake_drivers[url](url) def _setup_pipeline(self, publishers): ev_pipeline = yaml.dump({ 'sources': [{ 'name': 'test_event', 'events': ['test.test'], 'sinks': ['test_sink'] }], 'sinks': [{ 'name': 'test_sink', 'publishers': publishers }] }) if six.PY3: ev_pipeline = ev_pipeline.encode('utf-8') ev_pipeline_cfg_file = fileutils.write_to_tempfile( content=ev_pipeline, prefix="event_pipeline", suffix="yaml") self.CONF.set_override('event_pipeline_cfg_file', ev_pipeline_cfg_file) ev_pipeline_mgr = pipeline.setup_event_pipeline() return ev_pipeline_mgr def _setup_endpoint(self, publishers): ev_pipeline_mgr = self._setup_pipeline(publishers) self.endpoint = event_endpoint.EventsNotificationEndpoint( ev_pipeline_mgr) self.endpoint.event_converter = mock.MagicMock() self.endpoint.event_converter.to_event.return_value = mock.MagicMock( event_type='test.test') def setUp(self): super(TestEventEndpoint, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF([]) self.CONF.set_override("connection", "log://", group='database') self.CONF.set_override("store_events", True, group="notification") self.setup_messaging(self.CONF) self.useFixture(mockpatch.PatchObject(publisher, 'get_publisher', side_effect=self.get_publisher)) self.fake_publisher = mock.Mock() self.useFixture(mockpatch.Patch( 'ceilometer.publisher.test.TestPublisher', return_value=self.fake_publisher)) def test_message_to_event(self): self._setup_endpoint(['test://']) self.endpoint.info([{'ctxt': TEST_NOTICE_CTXT, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'compute.instance.create.end', 'payload': TEST_NOTICE_PAYLOAD, 'metadata': TEST_NOTICE_METADATA}]) def test_bad_event_non_ack_and_requeue(self): self._setup_endpoint(['test://']) self.fake_publisher.publish_events.side_effect = Exception self.CONF.set_override("ack_on_event_error", False, group="notification") ret = self.endpoint.info([{'ctxt': TEST_NOTICE_CTXT, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'compute.instance.create.end', 'payload': TEST_NOTICE_PAYLOAD, 'metadata': TEST_NOTICE_METADATA}]) self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) def test_message_to_event_bad_event(self): self._setup_endpoint(['test://']) self.fake_publisher.publish_events.side_effect = Exception self.CONF.set_override("ack_on_event_error", False, group="notification") message = { 'payload': {'event_type': "foo", 'message_id': "abc"}, 'metadata': {}, 'ctxt': {} } with mock.patch("ceilometer.pipeline.LOG") as mock_logger: ret = self.endpoint.process_notification('info', [message]) self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) exception_mock = mock_logger.exception self.assertIn('Exit after error from publisher', exception_mock.call_args_list[0][0][0]) def test_message_to_event_bad_event_multi_publish(self): self._setup_endpoint(['test://', 'except://']) self.fake_publisher.publish_events.side_effect = Exception self.CONF.set_override("ack_on_event_error", False, group="notification") message = { 'payload': {'event_type': "foo", 'message_id': "abc"}, 'metadata': {}, 'ctxt': {} } with mock.patch("ceilometer.pipeline.LOG") as mock_logger: ret = self.endpoint.process_notification('info', [message]) self.assertEqual(oslo_messaging.NotificationResult.HANDLED, ret) exception_mock = mock_logger.exception self.assertIn('Continue after error from publisher', exception_mock.call_args_list[0][0][0]) ceilometer-6.1.5/ceilometer/tests/unit/event/test_trait_plugins.py0000664000567000056710000001035213072744706026716 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base from ceilometer.event import trait_plugins class TestSplitterPlugin(base.BaseTestCase): def setUp(self): super(TestSplitterPlugin, self).setUp() self.pclass = trait_plugins.SplitterTraitPlugin def test_split(self): param = dict(separator='-', segment=0) plugin = self.pclass(**param) match_list = [('test.thing', 'test-foobar-baz')] value = plugin.trait_values(match_list)[0] self.assertEqual('test', value) param = dict(separator='-', segment=1) plugin = self.pclass(**param) match_list = [('test.thing', 'test-foobar-baz')] value = plugin.trait_values(match_list)[0] self.assertEqual('foobar', value) param = dict(separator='-', segment=1, max_split=1) plugin = self.pclass(**param) match_list = [('test.thing', 'test-foobar-baz')] value = plugin.trait_values(match_list)[0] self.assertEqual('foobar-baz', value) def test_no_sep(self): param = dict(separator='-', segment=0) plugin = self.pclass(**param) match_list = [('test.thing', 'test.foobar.baz')] value = plugin.trait_values(match_list)[0] self.assertEqual('test.foobar.baz', value) def test_no_segment(self): param = dict(separator='-', segment=5) plugin = self.pclass(**param) match_list = [('test.thing', 'test-foobar-baz')] value = plugin.trait_values(match_list)[0] self.assertIs(None, value) def test_no_match(self): param = dict(separator='-', segment=0) plugin = self.pclass(**param) match_list = [] value = plugin.trait_values(match_list) self.assertEqual([], value) class TestBitfieldPlugin(base.BaseTestCase): def setUp(self): super(TestBitfieldPlugin, self).setUp() self.pclass = trait_plugins.BitfieldTraitPlugin self.init = 0 self.params = dict(initial_bitfield=self.init, flags=[dict(path='payload.foo', bit=0, value=42), dict(path='payload.foo', bit=1, value=12), dict(path='payload.thud', bit=1, value=23), dict(path='thingy.boink', bit=4), dict(path='thingy.quux', bit=6, value="wokka"), dict(path='payload.bar', bit=10, value='test')]) def test_bitfield(self): match_list = [('payload.foo', 12), ('payload.bar', 'test'), ('thingy.boink', 'testagain')] plugin = self.pclass(**self.params) value = plugin.trait_values(match_list) self.assertEqual(0x412, value[0]) def test_initial(self): match_list = [('payload.foo', 12), ('payload.bar', 'test'), ('thingy.boink', 'testagain')] self.params['initial_bitfield'] = 0x2000 plugin = self.pclass(**self.params) value = plugin.trait_values(match_list) self.assertEqual(0x2412, value[0]) def test_no_match(self): match_list = [] plugin = self.pclass(**self.params) value = plugin.trait_values(match_list) self.assertEqual(self.init, value[0]) def test_multi(self): match_list = [('payload.foo', 12), ('payload.thud', 23), ('payload.bar', 'test'), ('thingy.boink', 'testagain')] plugin = self.pclass(**self.params) value = plugin.trait_values(match_list) self.assertEqual(0x412, value[0]) ceilometer-6.1.5/ceilometer/tests/unit/event/test_converter.py0000664000567000056710000010015313072744706026040 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import jsonpath_rw_ext import mock from oslo_config import fixture as fixture_config import six from ceilometer import declarative from ceilometer.event import converter from ceilometer.event.storage import models from ceilometer import service as ceilometer_service from ceilometer.tests import base class ConverterBase(base.BaseTestCase): @staticmethod def _create_test_notification(event_type, message_id, **kw): return dict(event_type=event_type, message_id=message_id, priority="INFO", publisher_id="compute.host-1-2-3", timestamp="2013-08-08 21:06:37.803826", payload=kw, ) def assertIsValidEvent(self, event, notification): self.assertIsNot( None, event, "Notification dropped unexpectedly:" " %s" % str(notification)) self.assertIsInstance(event, models.Event) def assertIsNotValidEvent(self, event, notification): self.assertIs( None, event, "Notification NOT dropped when expected to be dropped:" " %s" % str(notification)) def assertHasTrait(self, event, name, value=None, dtype=None): traits = [trait for trait in event.traits if trait.name == name] self.assertTrue( len(traits) > 0, "Trait %s not found in event %s" % (name, event)) trait = traits[0] if value is not None: self.assertEqual(value, trait.value) if dtype is not None: self.assertEqual(dtype, trait.dtype) if dtype == models.Trait.INT_TYPE: self.assertIsInstance(trait.value, int) elif dtype == models.Trait.FLOAT_TYPE: self.assertIsInstance(trait.value, float) elif dtype == models.Trait.DATETIME_TYPE: self.assertIsInstance(trait.value, datetime.datetime) elif dtype == models.Trait.TEXT_TYPE: self.assertIsInstance(trait.value, six.string_types) def assertDoesNotHaveTrait(self, event, name): traits = [trait for trait in event.traits if trait.name == name] self.assertEqual( len(traits), 0, "Extra Trait %s found in event %s" % (name, event)) def assertHasDefaultTraits(self, event): text = models.Trait.TEXT_TYPE self.assertHasTrait(event, 'service', dtype=text) def _cmp_tree(self, this, other): if hasattr(this, 'right') and hasattr(other, 'right'): return (self._cmp_tree(this.right, other.right) and self._cmp_tree(this.left, other.left)) if not hasattr(this, 'right') and not hasattr(other, 'right'): return this == other return False def assertPathsEqual(self, path1, path2): self.assertTrue(self._cmp_tree(path1, path2), 'JSONPaths not equivalent %s %s' % (path1, path2)) class TestTraitDefinition(ConverterBase): def setUp(self): super(TestTraitDefinition, self).setUp() self.n1 = self._create_test_notification( "test.thing", "uuid-for-notif-0001", instance_uuid="uuid-for-instance-0001", instance_id="id-for-instance-0001", instance_uuid2=None, instance_id2=None, host='host-1-2-3', bogus_date='', image_meta=dict( disk_gb='20', thing='whatzit'), foobar=50) self.ext1 = mock.MagicMock(name='mock_test_plugin') self.test_plugin_class = self.ext1.plugin self.test_plugin = self.test_plugin_class() self.test_plugin.trait_values.return_value = ['foobar'] self.ext1.reset_mock() self.ext2 = mock.MagicMock(name='mock_nothing_plugin') self.nothing_plugin_class = self.ext2.plugin self.nothing_plugin = self.nothing_plugin_class() self.nothing_plugin.trait_values.return_value = [None] self.ext2.reset_mock() self.fake_plugin_mgr = dict(test=self.ext1, nothing=self.ext2) def test_to_trait_with_plugin(self): cfg = dict(type='text', fields=['payload.instance_id', 'payload.instance_uuid'], plugin=dict(name='test')) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) self.assertEqual('foobar', t.value) self.test_plugin_class.assert_called_once_with() self.test_plugin.trait_values.assert_called_once_with([ ('payload.instance_id', 'id-for-instance-0001'), ('payload.instance_uuid', 'uuid-for-instance-0001')]) def test_to_trait_null_match_with_plugin(self): cfg = dict(type='text', fields=['payload.nothere', 'payload.bogus'], plugin=dict(name='test')) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) self.assertEqual('foobar', t.value) self.test_plugin_class.assert_called_once_with() self.test_plugin.trait_values.assert_called_once_with([]) def test_to_trait_with_plugin_null(self): cfg = dict(type='text', fields=['payload.instance_id', 'payload.instance_uuid'], plugin=dict(name='nothing')) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIs(None, t) self.nothing_plugin_class.assert_called_once_with() self.nothing_plugin.trait_values.assert_called_once_with([ ('payload.instance_id', 'id-for-instance-0001'), ('payload.instance_uuid', 'uuid-for-instance-0001')]) def test_to_trait_with_plugin_with_parameters(self): cfg = dict(type='text', fields=['payload.instance_id', 'payload.instance_uuid'], plugin=dict(name='test', parameters=dict(a=1, b='foo'))) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) self.assertEqual('foobar', t.value) self.test_plugin_class.assert_called_once_with(a=1, b='foo') self.test_plugin.trait_values.assert_called_once_with([ ('payload.instance_id', 'id-for-instance-0001'), ('payload.instance_uuid', 'uuid-for-instance-0001')]) def test_to_trait(self): cfg = dict(type='text', fields='payload.instance_id') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) self.assertEqual('id-for-instance-0001', t.value) cfg = dict(type='int', fields='payload.image_meta.disk_gb') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.INT_TYPE, t.dtype) self.assertEqual(20, t.value) def test_to_trait_multiple(self): cfg = dict(type='text', fields=['payload.instance_id', 'payload.instance_uuid']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('id-for-instance-0001', t.value) cfg = dict(type='text', fields=['payload.instance_uuid', 'payload.instance_id']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('uuid-for-instance-0001', t.value) def test_to_trait_multiple_different_nesting(self): cfg = dict(type='int', fields=['payload.foobar', 'payload.image_meta.disk_gb']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual(50, t.value) cfg = dict(type='int', fields=['payload.image_meta.disk_gb', 'payload.foobar']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual(20, t.value) def test_to_trait_some_null_multiple(self): cfg = dict(type='text', fields=['payload.instance_id2', 'payload.instance_uuid']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('uuid-for-instance-0001', t.value) def test_to_trait_some_missing_multiple(self): cfg = dict(type='text', fields=['payload.not_here_boss', 'payload.instance_uuid']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('uuid-for-instance-0001', t.value) def test_to_trait_missing(self): cfg = dict(type='text', fields='payload.not_here_boss') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIs(None, t) def test_to_trait_null(self): cfg = dict(type='text', fields='payload.instance_id2') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIs(None, t) def test_to_trait_empty_nontext(self): cfg = dict(type='datetime', fields='payload.bogus_date') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIs(None, t) def test_to_trait_multiple_null_missing(self): cfg = dict(type='text', fields=['payload.not_here_boss', 'payload.instance_id2']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIs(None, t) def test_missing_fields_config(self): self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'bogus_trait', dict(), self.fake_plugin_mgr) def test_string_fields_config(self): cfg = dict(fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertPathsEqual(t.getter.__self__, jsonpath_rw_ext.parse('payload.test')) def test_list_fields_config(self): cfg = dict(fields=['payload.test', 'payload.other']) t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertPathsEqual( t.getter.__self__, jsonpath_rw_ext.parse('(payload.test)|(payload.other)')) def test_invalid_path_config(self): # test invalid jsonpath... cfg = dict(fields='payload.bogus(') self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'bogus_trait', cfg, self.fake_plugin_mgr) def test_invalid_plugin_config(self): # test invalid jsonpath... cfg = dict(fields='payload.test', plugin=dict(bogus="true")) self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'test_trait', cfg, self.fake_plugin_mgr) def test_unknown_plugin(self): # test invalid jsonpath... cfg = dict(fields='payload.test', plugin=dict(name='bogus')) self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'test_trait', cfg, self.fake_plugin_mgr) def test_type_config(self): cfg = dict(type='text', fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertEqual(models.Trait.TEXT_TYPE, t.trait_type) cfg = dict(type='int', fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertEqual(models.Trait.INT_TYPE, t.trait_type) cfg = dict(type='float', fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertEqual(models.Trait.FLOAT_TYPE, t.trait_type) cfg = dict(type='datetime', fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertEqual(models.Trait.DATETIME_TYPE, t.trait_type) def test_invalid_type_config(self): # test invalid jsonpath... cfg = dict(type='bogus', fields='payload.test') self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'bogus_trait', cfg, self.fake_plugin_mgr) class TestEventDefinition(ConverterBase): def setUp(self): super(TestEventDefinition, self).setUp() self.traits_cfg = { 'instance_id': { 'type': 'text', 'fields': ['payload.instance_uuid', 'payload.instance_id'], }, 'host': { 'type': 'text', 'fields': 'payload.host', }, } self.test_notification1 = self._create_test_notification( "test.thing", "uuid-for-notif-0001", instance_id="uuid-for-instance-0001", host='host-1-2-3') self.test_notification2 = self._create_test_notification( "test.thing", "uuid-for-notif-0002", instance_id="uuid-for-instance-0002") self.test_notification3 = self._create_test_notification( "test.thing", "uuid-for-notif-0003", instance_id="uuid-for-instance-0003", host=None) self.fake_plugin_mgr = {} def test_to_event(self): dtype = models.Trait.TEXT_TYPE cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) e = edef.to_event(self.test_notification1) self.assertEqual('test.thing', e.event_type) self.assertEqual(datetime.datetime(2013, 8, 8, 21, 6, 37, 803826), e.generated) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'host', value='host-1-2-3', dtype=dtype) self.assertHasTrait(e, 'instance_id', value='uuid-for-instance-0001', dtype=dtype) def test_to_event_missing_trait(self): dtype = models.Trait.TEXT_TYPE cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) e = edef.to_event(self.test_notification2) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'instance_id', value='uuid-for-instance-0002', dtype=dtype) self.assertDoesNotHaveTrait(e, 'host') def test_to_event_null_trait(self): dtype = models.Trait.TEXT_TYPE cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) e = edef.to_event(self.test_notification3) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'instance_id', value='uuid-for-instance-0003', dtype=dtype) self.assertDoesNotHaveTrait(e, 'host') def test_bogus_cfg_no_traits(self): bogus = dict(event_type='test.foo') self.assertRaises(declarative.DefinitionException, converter.EventDefinition, bogus, self.fake_plugin_mgr) def test_bogus_cfg_no_type(self): bogus = dict(traits=self.traits_cfg) self.assertRaises(declarative.DefinitionException, converter.EventDefinition, bogus, self.fake_plugin_mgr) def test_included_type_string(self): cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertEqual(1, len(edef._included_types)) self.assertEqual('test.thing', edef._included_types[0]) self.assertEqual(0, len(edef._excluded_types)) self.assertTrue(edef.included_type('test.thing')) self.assertFalse(edef.excluded_type('test.thing')) self.assertTrue(edef.match_type('test.thing')) self.assertFalse(edef.match_type('random.thing')) def test_included_type_list(self): cfg = dict(event_type=['test.thing', 'other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertEqual(2, len(edef._included_types)) self.assertEqual(0, len(edef._excluded_types)) self.assertTrue(edef.included_type('test.thing')) self.assertTrue(edef.included_type('other.thing')) self.assertFalse(edef.excluded_type('test.thing')) self.assertTrue(edef.match_type('test.thing')) self.assertTrue(edef.match_type('other.thing')) self.assertFalse(edef.match_type('random.thing')) def test_excluded_type_string(self): cfg = dict(event_type='!test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertEqual(1, len(edef._included_types)) self.assertEqual('*', edef._included_types[0]) self.assertEqual('test.thing', edef._excluded_types[0]) self.assertEqual(1, len(edef._excluded_types)) self.assertEqual('test.thing', edef._excluded_types[0]) self.assertTrue(edef.excluded_type('test.thing')) self.assertTrue(edef.included_type('random.thing')) self.assertFalse(edef.match_type('test.thing')) self.assertTrue(edef.match_type('random.thing')) def test_excluded_type_list(self): cfg = dict(event_type=['!test.thing', '!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertEqual(1, len(edef._included_types)) self.assertEqual(2, len(edef._excluded_types)) self.assertTrue(edef.excluded_type('test.thing')) self.assertTrue(edef.excluded_type('other.thing')) self.assertFalse(edef.excluded_type('random.thing')) self.assertFalse(edef.match_type('test.thing')) self.assertFalse(edef.match_type('other.thing')) self.assertTrue(edef.match_type('random.thing')) def test_mixed_type_list(self): cfg = dict(event_type=['*.thing', '!test.thing', '!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertEqual(1, len(edef._included_types)) self.assertEqual(2, len(edef._excluded_types)) self.assertTrue(edef.excluded_type('test.thing')) self.assertTrue(edef.excluded_type('other.thing')) self.assertFalse(edef.excluded_type('random.thing')) self.assertFalse(edef.match_type('test.thing')) self.assertFalse(edef.match_type('other.thing')) self.assertFalse(edef.match_type('random.whatzit')) self.assertTrue(edef.match_type('random.thing')) def test_catchall(self): cfg = dict(event_type=['*.thing', '!test.thing', '!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertFalse(edef.is_catchall) cfg = dict(event_type=['!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertFalse(edef.is_catchall) cfg = dict(event_type=['other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertFalse(edef.is_catchall) cfg = dict(event_type=['*', '!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertFalse(edef.is_catchall) cfg = dict(event_type=['*'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertTrue(edef.is_catchall) cfg = dict(event_type=['*', 'foo'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertTrue(edef.is_catchall) @mock.patch('oslo_utils.timeutils.utcnow') def test_extract_when(self, mock_utcnow): now = datetime.datetime.utcnow() modified = now + datetime.timedelta(minutes=1) mock_utcnow.return_value = now body = {"timestamp": str(modified)} when = converter.EventDefinition._extract_when(body) self.assertTimestampEqual(modified, when) body = {"_context_timestamp": str(modified)} when = converter.EventDefinition._extract_when(body) self.assertTimestampEqual(modified, when) then = now + datetime.timedelta(hours=1) body = {"timestamp": str(modified), "_context_timestamp": str(then)} when = converter.EventDefinition._extract_when(body) self.assertTimestampEqual(modified, when) when = converter.EventDefinition._extract_when({}) self.assertTimestampEqual(now, when) def test_default_traits(self): cfg = dict(event_type='test.thing', traits={}) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) default_traits = converter.EventDefinition.DEFAULT_TRAITS.keys() traits = set(edef.traits.keys()) for dt in default_traits: self.assertIn(dt, traits) self.assertEqual(len(converter.EventDefinition.DEFAULT_TRAITS), len(edef.traits)) def test_traits(self): cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) default_traits = converter.EventDefinition.DEFAULT_TRAITS.keys() traits = set(edef.traits.keys()) for dt in default_traits: self.assertIn(dt, traits) self.assertIn('host', traits) self.assertIn('instance_id', traits) self.assertEqual(len(converter.EventDefinition.DEFAULT_TRAITS) + 2, len(edef.traits)) class TestNotificationConverter(ConverterBase): def setUp(self): super(TestNotificationConverter, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf ceilometer_service.prepare_service(argv=[], config_files=[]) self.valid_event_def1 = [{ 'event_type': 'compute.instance.create.*', 'traits': { 'instance_id': { 'type': 'text', 'fields': ['payload.instance_uuid', 'payload.instance_id'], }, 'host': { 'type': 'text', 'fields': 'payload.host', }, }, }] self.test_notification1 = self._create_test_notification( "compute.instance.create.start", "uuid-for-notif-0001", instance_id="uuid-for-instance-0001", host='host-1-2-3') self.test_notification2 = self._create_test_notification( "bogus.notification.from.mars", "uuid-for-notif-0002", weird='true', host='cydonia') self.fake_plugin_mgr = {} @mock.patch('oslo_utils.timeutils.utcnow') def test_converter_missing_keys(self, mock_utcnow): # test a malformed notification now = datetime.datetime.utcnow() mock_utcnow.return_value = now c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr, add_catchall=True) message = {'event_type': "foo", 'message_id': "abc", 'publisher_id': "1"} e = c.to_event(message) self.assertIsValidEvent(e, message) self.assertEqual(1, len(e.traits)) self.assertEqual("foo", e.event_type) self.assertEqual(now, e.generated) def test_converter_with_catchall(self): c = converter.NotificationEventsConverter( self.valid_event_def1, self.fake_plugin_mgr, add_catchall=True) self.assertEqual(2, len(c.definitions)) e = c.to_event(self.test_notification1) self.assertIsValidEvent(e, self.test_notification1) self.assertEqual(3, len(e.traits)) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'instance_id') self.assertHasTrait(e, 'host') e = c.to_event(self.test_notification2) self.assertIsValidEvent(e, self.test_notification2) self.assertEqual(1, len(e.traits)) self.assertHasDefaultTraits(e) self.assertDoesNotHaveTrait(e, 'instance_id') self.assertDoesNotHaveTrait(e, 'host') def test_converter_without_catchall(self): c = converter.NotificationEventsConverter( self.valid_event_def1, self.fake_plugin_mgr, add_catchall=False) self.assertEqual(1, len(c.definitions)) e = c.to_event(self.test_notification1) self.assertIsValidEvent(e, self.test_notification1) self.assertEqual(3, len(e.traits)) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'instance_id') self.assertHasTrait(e, 'host') e = c.to_event(self.test_notification2) self.assertIsNotValidEvent(e, self.test_notification2) def test_converter_empty_cfg_with_catchall(self): c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr, add_catchall=True) self.assertEqual(1, len(c.definitions)) e = c.to_event(self.test_notification1) self.assertIsValidEvent(e, self.test_notification1) self.assertEqual(1, len(e.traits)) self.assertHasDefaultTraits(e) e = c.to_event(self.test_notification2) self.assertIsValidEvent(e, self.test_notification2) self.assertEqual(1, len(e.traits)) self.assertHasDefaultTraits(e) def test_converter_empty_cfg_without_catchall(self): c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr, add_catchall=False) self.assertEqual(0, len(c.definitions)) e = c.to_event(self.test_notification1) self.assertIsNotValidEvent(e, self.test_notification1) e = c.to_event(self.test_notification2) self.assertIsNotValidEvent(e, self.test_notification2) @staticmethod def _convert_message(convert, level): message = {'priority': level, 'event_type': "foo", 'message_id': "abc", 'publisher_id': "1"} return convert.to_event(message) def test_store_raw_all(self): self.CONF.event.store_raw = ['info', 'error'] c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr) self.assertTrue(self._convert_message(c, 'info').raw) self.assertTrue(self._convert_message(c, 'error').raw) def test_store_raw_info_only(self): self.CONF.event.store_raw = ['info'] c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr) self.assertTrue(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) def test_store_raw_error_only(self): self.CONF.event.store_raw = ['error'] c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr) self.assertFalse(self._convert_message(c, 'info').raw) self.assertTrue(self._convert_message(c, 'error').raw) def test_store_raw_skip_all(self): c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr) self.assertFalse(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) def test_store_raw_info_only_no_case(self): self.CONF.event.store_raw = ['INFO'] c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr) self.assertTrue(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) def test_store_raw_bad_skip_all(self): self.CONF.event.store_raw = ['unknown'] c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr) self.assertFalse(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) def test_store_raw_bad_and_good(self): self.CONF.event.store_raw = ['info', 'unknown'] c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr) self.assertTrue(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) def test_setup_events_default_config(self): self.CONF.set_override('definitions_cfg_file', '/not/existing/file', group='event') self.CONF.set_override('drop_unmatched_notifications', False, group='event') c = converter.setup_events(self.fake_plugin_mgr) self.assertIsInstance(c, converter.NotificationEventsConverter) self.assertEqual(1, len(c.definitions)) self.assertTrue(c.definitions[0].is_catchall) self.CONF.set_override('drop_unmatched_notifications', True, group='event') c = converter.setup_events(self.fake_plugin_mgr) self.assertIsInstance(c, converter.NotificationEventsConverter) self.assertEqual(0, len(c.definitions)) ceilometer-6.1.5/ceilometer/tests/unit/event/__init__.py0000664000567000056710000000000013072744703024514 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/test_middleware.py0000664000567000056710000001030413072744706025023 0ustar jenkinsjenkins00000000000000# # Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as fixture_config from ceilometer import middleware from ceilometer.tests import base HTTP_REQUEST = { u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', u'_context_is_admin': True, u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', u'_context_quota_class': None, u'_context_read_deleted': u'no', u'_context_remote_address': u'10.0.2.15', u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', u'_context_roles': [u'admin'], u'_context_timestamp': u'2012-05-08T20:23:41.425105', u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'event_type': u'http.request', u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', u'payload': {u'request': {'HTTP_X_FOOBAR': 'foobaz', 'HTTP_X_USER_ID': 'jd-x32', 'HTTP_X_PROJECT_ID': 'project-id', 'HTTP_X_SERVICE_NAME': 'nova'}}, u'priority': u'INFO', u'publisher_id': u'compute.vagrant-precise', u'timestamp': u'2012-05-08 20:23:48.028195', } HTTP_RESPONSE = { u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', u'_context_is_admin': True, u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', u'_context_quota_class': None, u'_context_read_deleted': u'no', u'_context_remote_address': u'10.0.2.15', u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', u'_context_roles': [u'admin'], u'_context_timestamp': u'2012-05-08T20:23:41.425105', u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'event_type': u'http.response', u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', u'payload': {u'request': {'HTTP_X_FOOBAR': 'foobaz', 'HTTP_X_USER_ID': 'jd-x32', 'HTTP_X_PROJECT_ID': 'project-id', 'HTTP_X_SERVICE_NAME': 'nova'}, u'response': {'status': '200 OK'}}, u'priority': u'INFO', u'publisher_id': u'compute.vagrant-precise', u'timestamp': u'2012-05-08 20:23:48.028195', } class TestNotifications(base.BaseTestCase): def setUp(self): super(TestNotifications, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.setup_messaging(self.CONF) def test_process_request_notification(self): sample = list(middleware.HTTPRequest(mock.Mock()).process_notification( HTTP_REQUEST ))[0] self.assertEqual(HTTP_REQUEST['payload']['request']['HTTP_X_USER_ID'], sample.user_id) self.assertEqual(HTTP_REQUEST['payload']['request'] ['HTTP_X_PROJECT_ID'], sample.project_id) self.assertEqual(HTTP_REQUEST['payload']['request'] ['HTTP_X_SERVICE_NAME'], sample.resource_id) self.assertEqual(1, sample.volume) def test_process_response_notification(self): sample = list(middleware.HTTPResponse( mock.Mock()).process_notification(HTTP_RESPONSE))[0] self.assertEqual(HTTP_RESPONSE['payload']['request']['HTTP_X_USER_ID'], sample.user_id) self.assertEqual(HTTP_RESPONSE['payload']['request'] ['HTTP_X_PROJECT_ID'], sample.project_id) self.assertEqual(HTTP_RESPONSE['payload']['request'] ['HTTP_X_SERVICE_NAME'], sample.resource_id) self.assertEqual(1, sample.volume) def test_targets(self): targets = middleware.HTTPRequest(mock.Mock()).get_targets(self.CONF) self.assertEqual(4, len(targets)) ceilometer-6.1.5/ceilometer/tests/unit/telemetry/0000775000567000056710000000000013072745164023310 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/telemetry/__init__.py0000664000567000056710000000000013072744703025405 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/telemetry/test_notifications.py0000664000567000056710000000760213072744703027575 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base from ceilometer.telemetry import notifications NOTIFICATION = { u'_context_domain': None, u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', 'event_type': u'telemetry.api', 'timestamp': u'2015-06-1909: 19: 35.786893', u'_context_auth_token': None, u'_context_read_only': False, 'payload': {'samples': [{'counter_name': u'instance100', u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', u'resource_id': u'instance', u'timestamp': u'2015-06-19T09: 19: 35.785330', u'message_signature': u'fake_signature1', u'resource_metadata': {u'foo': u'bar'}, u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', u'counter_unit': u'instance', u'counter_volume': 1.0, u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', u'message_id': u'4d865c6e-1664-11e5-9d41-0819a6cff905', u'counter_type': u'gauge'}, {u'counter_name': u'instance100', u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', u'resource_id': u'instance', u'timestamp': u'2015-06-19T09: 19: 35.785330', u'message_signature': u'fake_signature12', u'resource_metadata': {u'foo': u'bar'}, u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', u'counter_unit': u'instance', u'counter_volume': 1.0, u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', u'message_id': u'4d866da8-1664-11e5-9d41-0819a6cff905', u'counter_type': u'gauge'}]}, u'_context_resource_uuid': None, u'_context_user_identity': u'fake_user_identity---', u'_context_show_deleted': False, u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', 'priority': 'info', u'_context_is_admin': True, u'_context_project_domain': None, u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2', u'_context_user_domain': None, 'publisher_id': u'ceilometer.api', 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' } class TelemetryIpcTestCase(base.BaseTestCase): def test_process_notification(self): sample_creation = notifications.TelemetryIpc(None) samples = list(sample_creation.process_notification(NOTIFICATION)) self.assertEqual(2, len(samples)) payload = NOTIFICATION["payload"]['samples'] for index, sample in enumerate(samples): self.assertEqual(payload[index]["user_id"], sample.user_id) self.assertEqual(payload[index]["counter_name"], sample.name) self.assertEqual(payload[index]["resource_id"], sample.resource_id) self.assertEqual(payload[index]["timestamp"], sample.timestamp) self.assertEqual(payload[index]["resource_metadata"], sample.resource_metadata) self.assertEqual(payload[index]["counter_volume"], sample.volume) self.assertEqual(payload[index]["source"], sample.source) self.assertEqual(payload[index]["counter_type"], sample.type) self.assertEqual(payload[index]["message_id"], sample.id) self.assertEqual(payload[index]["counter_unit"], sample.unit) ceilometer-6.1.5/ceilometer/tests/unit/test_novaclient.py0000664000567000056710000002263213072744706025057 0ustar jenkinsjenkins00000000000000# Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import novaclient from oslo_config import fixture as fixture_config from oslotest import base from oslotest import mockpatch from ceilometer import nova_client class TestNovaClient(base.BaseTestCase): def setUp(self): super(TestNovaClient, self).setUp() self._flavors_count = 0 self._images_count = 0 self.nv = nova_client.Client() self.useFixture(mockpatch.PatchObject( self.nv.nova_client.flavors, 'get', side_effect=self.fake_flavors_get)) self.useFixture(mockpatch.PatchObject( self.nv.nova_client.images, 'get', side_effect=self.fake_images_get)) self.CONF = self.useFixture(fixture_config.Config()).conf def fake_flavors_get(self, *args, **kwargs): self._flavors_count += 1 a = mock.MagicMock() a.id = args[0] if a.id == 1: a.name = 'm1.tiny' elif a.id == 2: a.name = 'm1.large' else: raise novaclient.exceptions.NotFound('foobar') return a def fake_images_get(self, *args, **kwargs): self._images_count += 1 a = mock.MagicMock() a.id = args[0] image_details = { 1: ('ubuntu-12.04-x86', dict(kernel_id=11, ramdisk_id=21)), 2: ('centos-5.4-x64', dict(kernel_id=12, ramdisk_id=22)), 3: ('rhel-6-x64', None), 4: ('rhel-6-x64', dict()), 5: ('rhel-6-x64', dict(kernel_id=11)), 6: ('rhel-6-x64', dict(ramdisk_id=21)) } if a.id in image_details: a.name = image_details[a.id][0] a.metadata = image_details[a.id][1] else: raise novaclient.exceptions.NotFound('foobar') return a @staticmethod def fake_servers_list(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 1} a.image = {'id': 1} b = mock.MagicMock() b.id = 43 b.flavor = {'id': 2} b.image = {'id': 2} return [a, b] def test_instance_get_all_by_host(self): with mock.patch.object(self.nv.nova_client.servers, 'list', side_effect=self.fake_servers_list): instances = self.nv.instance_get_all_by_host('foobar') self.assertEqual(2, len(instances)) self.assertEqual('m1.tiny', instances[0].flavor['name']) self.assertEqual('ubuntu-12.04-x86', instances[0].image['name']) self.assertEqual(11, instances[0].kernel_id) self.assertEqual(21, instances[0].ramdisk_id) def test_instance_get_all(self): with mock.patch.object(self.nv.nova_client.servers, 'list', side_effect=self.fake_servers_list): instances = self.nv.instance_get_all() self.assertEqual(2, len(instances)) self.assertEqual(42, instances[0].id) self.assertEqual(1, instances[0].flavor['id']) self.assertEqual(1, instances[0].image['id']) @staticmethod def fake_servers_list_unknown_flavor(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 666} a.image = {'id': 1} return [a] def test_instance_get_all_by_host_unknown_flavor(self): with mock.patch.object( self.nv.nova_client.servers, 'list', side_effect=self.fake_servers_list_unknown_flavor): instances = self.nv.instance_get_all_by_host('foobar') self.assertEqual(1, len(instances)) self.assertEqual('unknown-id-666', instances[0].flavor['name']) @staticmethod def fake_servers_list_unknown_image(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 1} a.image = {'id': 666} return [a] @staticmethod def fake_servers_list_image_missing_metadata(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 1} a.image = {'id': args[0]} return [a] @staticmethod def fake_instance_image_missing(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 666} a.image = None return [a] def test_instance_get_all_by_host_unknown_image(self): with mock.patch.object( self.nv.nova_client.servers, 'list', side_effect=self.fake_servers_list_unknown_image): instances = self.nv.instance_get_all_by_host('foobar') self.assertEqual(1, len(instances)) self.assertEqual('unknown-id-666', instances[0].image['name']) def test_with_flavor_and_image(self): results = self.nv._with_flavor_and_image(self.fake_servers_list()) instance = results[0] self.assertEqual(2, len(results)) self.assertEqual('ubuntu-12.04-x86', instance.image['name']) self.assertEqual('m1.tiny', instance.flavor['name']) self.assertEqual(11, instance.kernel_id) self.assertEqual(21, instance.ramdisk_id) def test_with_flavor_and_image_unknown_image(self): instances = self.fake_servers_list_unknown_image() results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertEqual('unknown-id-666', instance.image['name']) self.assertNotEqual(instance.flavor['name'], 'unknown-id-666') self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_flavor_and_image_unknown_flavor(self): instances = self.fake_servers_list_unknown_flavor() results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertEqual('unknown-id-666', instance.flavor['name']) self.assertEqual(0, instance.flavor['vcpus']) self.assertEqual(0, instance.flavor['ram']) self.assertEqual(0, instance.flavor['disk']) self.assertNotEqual(instance.image['name'], 'unknown-id-666') self.assertEqual(11, instance.kernel_id) self.assertEqual(21, instance.ramdisk_id) def test_with_flavor_and_image_none_metadata(self): instances = self.fake_servers_list_image_missing_metadata(3) results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_flavor_and_image_missing_metadata(self): instances = self.fake_servers_list_image_missing_metadata(4) results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_flavor_and_image_missing_ramdisk(self): instances = self.fake_servers_list_image_missing_metadata(5) results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertEqual(11, instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_flavor_and_image_missing_kernel(self): instances = self.fake_servers_list_image_missing_metadata(6) results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertIsNone(instance.kernel_id) self.assertEqual(21, instance.ramdisk_id) def test_with_flavor_and_image_no_cache(self): results = self.nv._with_flavor_and_image(self.fake_servers_list()) self.assertEqual(2, len(results)) self.assertEqual(2, self._flavors_count) self.assertEqual(2, self._images_count) def test_with_flavor_and_image_cache(self): results = self.nv._with_flavor_and_image(self.fake_servers_list() * 2) self.assertEqual(4, len(results)) self.assertEqual(2, self._flavors_count) self.assertEqual(2, self._images_count) def test_with_flavor_and_image_unknown_image_cache(self): instances = self.fake_servers_list_unknown_image() results = self.nv._with_flavor_and_image(instances * 2) self.assertEqual(2, len(results)) self.assertEqual(1, self._flavors_count) self.assertEqual(1, self._images_count) for instance in results: self.assertEqual('unknown-id-666', instance.image['name']) self.assertNotEqual(instance.flavor['name'], 'unknown-id-666') self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_missing_image_instance(self): instances = self.fake_instance_image_missing() results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.image) self.assertIsNone(instance.ramdisk_id) def test_with_nova_http_log_debug(self): self.CONF.set_override("nova_http_log_debug", True) self.nv = nova_client.Client() self.assertIsNotNone(self.nv.nova_client.client.logger) ceilometer-6.1.5/ceilometer/tests/unit/test_neutronclient_lbaas_v2.py0000664000567000056710000003431313072744706027356 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutronclient.v2_0 import client from oslotest import base from ceilometer import neutron_client class TestNeutronClientLBaaSV2(base.BaseTestCase): def setUp(self): super(TestNeutronClientLBaaSV2, self).setUp() self.nc = neutron_client.Client() @staticmethod def fake_list_lbaas_pools(): return { 'pools': [{ 'lb_algorithm': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': 'simple pool', 'admin_state_up': True, 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'healthmonitor_id': None, 'listeners': [{ 'id': "35cb8516-1173-4035-8dae-0dae3453f37f" } ], 'members': [{ 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858'} ], 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', 'name': 'pool1' }] } @staticmethod def fake_list_lbaas_members(): return { 'members': [{ 'weight': 1, 'admin_state_up': True, 'subnet_id': '013d3059-87a4-45a5-91e9-d721068ae0b2', 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'address': '10.0.0.8', 'protocol_port': 80, 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858' }] } @staticmethod def fake_list_lbaas_healthmonitors(): return { 'healthmonitors': [{ 'admin_state_up': True, 'tenant_id': '6f3584d5754048a18e30685362b88411', 'delay': 1, 'expected_codes': '200,201,202', 'max_retries': 5, 'http_method': 'GET', 'timeout': 1, 'pools': [{ 'id': '74aa2010-a59f-4d35-a436-60a6da882819' }], 'url_path': '/index.html', 'type': 'HTTP', 'id': '0a9ac99d-0a09-4b18-8499-a0796850279a' }] } @staticmethod def fake_show_listener(): return { 'listener': { 'default_pool_id': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'loadbalancers': [{ 'id': 'a9729389-6147-41a3-ab22-a24aed8692b2' }], 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', 'connection_limit': 100, 'protocol_port': 80, 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', 'name': '' } } @staticmethod def fake_retrieve_loadbalancer_status(): return { 'statuses': { 'loadbalancer': { 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'listeners': [{ 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'pools': [{ 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'members': [{ 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE' }], 'healthmonitor': { 'id': '785131d2-8f7b-4fee-a7e7-3196e11b4518', 'provisioning_status': 'ACTIVE' } }] }] } } } @staticmethod def fake_retrieve_loadbalancer_status_complex(): return { 'statuses': { 'loadbalancer': { 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'listeners': [{ 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'pools': [{ 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'members': [{ 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE' }, { 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf969', 'operating_status': 'OFFLINE', 'provisioning_status': 'ACTIVE' }], 'healthmonitor': { 'id': '785131d2-8f7b-4fee-a7e7-3196e11b4518', 'provisioning_status': 'ACTIVE' } }, { 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce6f6', 'operating_status': 'OFFLINE', 'provisioning_status': 'ACTIVE', 'members': [{ 'id': 'fcf23bde-8cf9-4616-883f-208cebcbfa7a', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE' }], 'healthmonitor': { 'id': '785131d2-8f7b-4fee-a7e7-3196e11b4629', 'provisioning_status': 'ACTIVE' } }] }, { 'id': '35cb8516-1173-4035-8dae-0dae3453f48e', 'operating_status': 'OFFLINE', 'provisioning_status': 'ACTIVE', 'pools': [{ 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce7g7', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'members': [{ 'id': 'fcf23bde-8cf9-4616-883f-208cebcbfb8b', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE' }], 'healthmonitor': { 'id': '785131d2-8f7b-4fee-a7e7-3196e11b473a', 'provisioning_status': 'ACTIVE' } }] }] } } } @staticmethod def fake_list_lbaas_listeners(): return { 'listeners': [{ 'default_pool_id': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'loadbalancers': [{ 'id': 'a9729389-6147-41a3-ab22-a24aed8692b2' }], 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', 'connection_limit': 100, 'protocol_port': 80, 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', 'name': 'listener_one' }]} @mock.patch.object(client.Client, 'list_lbaas_pools') @mock.patch.object(client.Client, 'show_listener') @mock.patch.object(neutron_client.Client, '_retrieve_loadbalancer_status_tree') def test_list_pools_v2(self, mock_status, mock_show, mock_list): mock_status.return_value = self.fake_retrieve_loadbalancer_status() mock_show.return_value = self.fake_show_listener() mock_list.return_value = self.fake_list_lbaas_pools() pools = self.nc.list_pools_v2() self.assertEqual(1, len(pools)) for pool in pools: self.assertEqual('ONLINE', pool['status']) self.assertEqual('ROUND_ROBIN', pool['lb_method']) @mock.patch.object(client.Client, 'list_lbaas_pools') @mock.patch.object(client.Client, 'list_lbaas_members') @mock.patch.object(client.Client, 'show_listener') @mock.patch.object(neutron_client.Client, '_retrieve_loadbalancer_status_tree') def test_list_members_v2(self, mock_status, mock_show, mock_list_members, mock_list_pools): mock_status.return_value = self.fake_retrieve_loadbalancer_status() mock_show.return_value = self.fake_show_listener() mock_list_pools.return_value = self.fake_list_lbaas_pools() mock_list_members.return_value = self.fake_list_lbaas_members() members = self.nc.list_members_v2() self.assertEqual(1, len(members)) for member in members: self.assertEqual('ONLINE', member['status']) self.assertEqual('4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', member['pool_id']) @mock.patch.object(client.Client, 'list_lbaas_healthmonitors') def test_list_health_monitors_v2(self, mock_list_healthmonitors): mock_list_healthmonitors.return_value = ( self.fake_list_lbaas_healthmonitors()) healthmonitors = self.nc.list_health_monitors_v2() self.assertEqual(1, len(healthmonitors)) for healthmonitor in healthmonitors: self.assertEqual(5, healthmonitor['max_retries']) @mock.patch.object(neutron_client.Client, '_retrieve_loadbalancer_status_tree') def test_get_member_status(self, mock_status): mock_status.return_value = ( self.fake_retrieve_loadbalancer_status_complex()) loadbalancer_id = '5b1b1b6e-cf8f-44b7-b912-957daa8ce5e5' listener_id = '35cb8516-1173-4035-8dae-0dae3453f37f' pool_id = '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5' parent_id = [listener_id, pool_id] result_status = self.nc._get_member_status(loadbalancer_id, parent_id) expected_keys = ['fcf23bde-8cf9-4616-883f-208cebcbf858', 'fcf23bde-8cf9-4616-883f-208cebcbf969'] excepted_status = { 'fcf23bde-8cf9-4616-883f-208cebcbf858': 'ONLINE', 'fcf23bde-8cf9-4616-883f-208cebcbf969': 'OFFLINE'} for key in result_status.keys(): self.assertIn(key, expected_keys) self.assertEqual(excepted_status[key], result_status[key]) @mock.patch.object(neutron_client.Client, '_retrieve_loadbalancer_status_tree') def test_get_pool_status(self, mock_status): mock_status.return_value = ( self.fake_retrieve_loadbalancer_status_complex()) loadbalancer_id = '5b1b1b6e-cf8f-44b7-b912-957daa8ce5e5' parent_id = '35cb8516-1173-4035-8dae-0dae3453f37f' result_status = self.nc._get_pool_status(loadbalancer_id, parent_id) expected_keys = ['4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', '4c0a0a5f-cf8f-44b7-b912-957daa8ce6f6'] excepted_status = { '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5': 'ONLINE', '4c0a0a5f-cf8f-44b7-b912-957daa8ce6f6': 'OFFLINE'} for key in result_status.keys(): self.assertIn(key, expected_keys) self.assertEqual(excepted_status[key], result_status[key]) @mock.patch.object(neutron_client.Client, '_retrieve_loadbalancer_status_tree') def test_get_listener_status(self, mock_status): mock_status.return_value = ( self.fake_retrieve_loadbalancer_status_complex()) loadbalancer_id = '5b1b1b6e-cf8f-44b7-b912-957daa8ce5e5' result_status = self.nc._get_listener_status(loadbalancer_id) expected_keys = ['35cb8516-1173-4035-8dae-0dae3453f37f', '35cb8516-1173-4035-8dae-0dae3453f48e'] excepted_status = { '35cb8516-1173-4035-8dae-0dae3453f37f': 'ONLINE', '35cb8516-1173-4035-8dae-0dae3453f48e': 'OFFLINE'} for key in result_status.keys(): self.assertIn(key, expected_keys) self.assertEqual(excepted_status[key], result_status[key]) @mock.patch.object(client.Client, 'list_listeners') @mock.patch.object(neutron_client.Client, '_retrieve_loadbalancer_status_tree') def test_list_listener(self, mock_status, mock_list_listeners): mock_list_listeners.return_value = ( self.fake_list_lbaas_listeners()) mock_status.return_value = ( self.fake_retrieve_loadbalancer_status()) listeners = self.nc.list_listener() expected_key = '35cb8516-1173-4035-8dae-0dae3453f37f' expected_status = 'ONLINE' self.assertEqual(1, len(listeners)) self.assertEqual(expected_key, listeners[0]['id']) self.assertEqual(expected_status, listeners[0]['operating_status']) ceilometer-6.1.5/ceilometer/tests/unit/storage/0000775000567000056710000000000013072745164022742 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/storage/sqlalchemy/0000775000567000056710000000000013072745164025104 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/storage/sqlalchemy/test_models.py0000664000567000056710000000731413072744703030003 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from oslotest import base import sqlalchemy from sqlalchemy.dialects.mysql import DECIMAL from sqlalchemy.types import NUMERIC from ceilometer.storage.sqlalchemy import models from ceilometer import utils class PreciseTimestampTest(base.BaseTestCase): @staticmethod def fake_dialect(name): def _type_descriptor_mock(desc): if type(desc) == DECIMAL: return NUMERIC(precision=desc.precision, scale=desc.scale) dialect = mock.MagicMock() dialect.name = name dialect.type_descriptor = _type_descriptor_mock return dialect def setUp(self): super(PreciseTimestampTest, self).setUp() self._mysql_dialect = self.fake_dialect('mysql') self._postgres_dialect = self.fake_dialect('postgres') self._type = models.PreciseTimestamp() self._date = datetime.datetime(2012, 7, 2, 10, 44) def test_load_dialect_impl_mysql(self): result = self._type.load_dialect_impl(self._mysql_dialect) self.assertEqual(NUMERIC, type(result)) self.assertEqual(20, result.precision) self.assertEqual(6, result.scale) self.assertTrue(result.asdecimal) def test_load_dialect_impl_postgres(self): result = self._type.load_dialect_impl(self._postgres_dialect) self.assertEqual(sqlalchemy.DateTime, type(result)) def test_process_bind_param_store_decimal_mysql(self): expected = utils.dt_to_decimal(self._date) result = self._type.process_bind_param(self._date, self._mysql_dialect) self.assertEqual(expected, result) def test_process_bind_param_store_datetime_postgres(self): result = self._type.process_bind_param(self._date, self._postgres_dialect) self.assertEqual(self._date, result) def test_process_bind_param_store_none_mysql(self): result = self._type.process_bind_param(None, self._mysql_dialect) self.assertIsNone(result) def test_process_bind_param_store_none_postgres(self): result = self._type.process_bind_param(None, self._postgres_dialect) self.assertIsNone(result) def test_process_result_value_datetime_mysql(self): dec_value = utils.dt_to_decimal(self._date) result = self._type.process_result_value(dec_value, self._mysql_dialect) self.assertEqual(self._date, result) def test_process_result_value_datetime_postgres(self): result = self._type.process_result_value(self._date, self._postgres_dialect) self.assertEqual(self._date, result) def test_process_result_value_none_mysql(self): result = self._type.process_result_value(None, self._mysql_dialect) self.assertIsNone(result) def test_process_result_value_none_postgres(self): result = self._type.process_result_value(None, self._postgres_dialect) self.assertIsNone(result) ceilometer-6.1.5/ceilometer/tests/unit/storage/sqlalchemy/__init__.py0000664000567000056710000000000013072744703027201 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/storage/test_get_connection.py0000664000567000056710000001167513072744706027364 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/storage/ """ import unittest import mock from oslo_config import fixture as fixture_config from oslotest import base import retrying try: from ceilometer.event.storage import impl_hbase as impl_hbase_event except ImportError: impl_hbase_event = None from ceilometer import storage from ceilometer.storage import impl_log from ceilometer.storage import impl_sqlalchemy import six class EngineTest(base.BaseTestCase): def test_get_connection(self): engine = storage.get_connection('log://localhost', 'ceilometer.metering.storage') self.assertIsInstance(engine, impl_log.Connection) def test_get_connection_no_such_engine(self): try: storage.get_connection('no-such-engine://localhost', 'ceilometer.metering.storage') except RuntimeError as err: self.assertIn('no-such-engine', six.text_type(err)) class ConnectionRetryTest(base.BaseTestCase): def setUp(self): super(ConnectionRetryTest, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf def test_retries(self): with mock.patch.object( retrying.Retrying, 'should_reject') as retry_reject: try: self.CONF.set_override("connection", "no-such-engine://", group="database") self.CONF.set_override("retry_interval", 0.00001, group="database") storage.get_connection_from_config(self.CONF) except RuntimeError as err: self.assertIn('no-such-engine', six.text_type(err)) self.assertEqual(10, retry_reject.call_count) class ConnectionConfigTest(base.BaseTestCase): def setUp(self): super(ConnectionConfigTest, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf def test_only_default_url(self): self.CONF.set_override("connection", "log://", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_log.Connection) conn = storage.get_connection_from_config(self.CONF, 'metering') self.assertIsInstance(conn, impl_log.Connection) def test_two_urls(self): self.CONF.set_override("connection", "log://", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_log.Connection) conn = storage.get_connection_from_config(self.CONF, 'metering') self.assertIsInstance(conn, impl_log.Connection) @unittest.skipUnless(impl_hbase_event, 'need hbase implementation') def test_three_urls(self): self.CONF.set_override("connection", "log://", group="database") self.CONF.set_override("event_connection", "hbase://__test__", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_log.Connection) conn = storage.get_connection_from_config(self.CONF, 'metering') self.assertIsInstance(conn, impl_log.Connection) conn = storage.get_connection_from_config(self.CONF, 'event') self.assertIsInstance(conn, impl_hbase_event.Connection) @unittest.skipUnless(impl_hbase_event, 'need hbase implementation') def test_three_urls_no_default(self): self.CONF.set_override("connection", None, group="database") self.CONF.set_override("metering_connection", "log://", group="database") self.CONF.set_override("event_connection", "hbase://__test__", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_log.Connection) conn = storage.get_connection_from_config(self.CONF, 'event') self.assertIsInstance(conn, impl_hbase_event.Connection) def test_sqlalchemy_driver(self): self.CONF.set_override("connection", "sqlite+pysqlite://", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_sqlalchemy.Connection) conn = storage.get_connection_from_config(self.CONF, 'metering') self.assertIsInstance(conn, impl_sqlalchemy.Connection) ceilometer-6.1.5/ceilometer/tests/unit/storage/test_base.py0000664000567000056710000000422713072744703025270 0ustar jenkinsjenkins00000000000000# Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import math from oslotest import base as testbase from ceilometer.storage import base class BaseTest(testbase.BaseTestCase): def test_iter_period(self): times = list(base.iter_period( datetime.datetime(2013, 1, 1, 12, 0), datetime.datetime(2013, 1, 1, 13, 0), 60)) self.assertEqual(60, len(times)) self.assertEqual((datetime.datetime(2013, 1, 1, 12, 10), datetime.datetime(2013, 1, 1, 12, 11)), times[10]) self.assertEqual((datetime.datetime(2013, 1, 1, 12, 21), datetime.datetime(2013, 1, 1, 12, 22)), times[21]) def test_iter_period_bis(self): times = list(base.iter_period( datetime.datetime(2013, 1, 2, 13, 0), datetime.datetime(2013, 1, 2, 14, 0), 55)) self.assertEqual(math.ceil(3600 / 55.0), len(times)) self.assertEqual((datetime.datetime(2013, 1, 2, 13, 9, 10), datetime.datetime(2013, 1, 2, 13, 10, 5)), times[10]) self.assertEqual((datetime.datetime(2013, 1, 2, 13, 19, 15), datetime.datetime(2013, 1, 2, 13, 20, 10)), times[21]) def test_handle_sort_key(self): sort_keys_meter = base._handle_sort_key('meter', 'foo') self.assertEqual(['foo', 'user_id', 'project_id'], sort_keys_meter) sort_keys_resource = base._handle_sort_key('resource', 'project_id') self.assertEqual(['project_id', 'user_id', 'timestamp'], sort_keys_resource) ceilometer-6.1.5/ceilometer/tests/unit/storage/test_models.py0000664000567000056710000000632113072744706025641 0ustar jenkinsjenkins00000000000000# # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslotest import base as testbase import six from ceilometer.event.storage import models as event_models from ceilometer.storage import base from ceilometer.storage import models class FakeModel(base.Model): def __init__(self, arg1, arg2): base.Model.__init__(self, arg1=arg1, arg2=arg2) class ModelTest(testbase.BaseTestCase): def test_create_attributes(self): m = FakeModel(1, 2) self.assertEqual(1, m.arg1) self.assertEqual(2, m.arg2) def test_as_dict(self): m = FakeModel(1, 2) d = m.as_dict() self.assertEqual({'arg1': 1, 'arg2': 2}, d) def test_as_dict_recursive(self): m = FakeModel(1, FakeModel('a', 'b')) d = m.as_dict() self.assertEqual({'arg1': 1, 'arg2': {'arg1': 'a', 'arg2': 'b'}}, d) def test_as_dict_recursive_list(self): m = FakeModel(1, [FakeModel('a', 'b')]) d = m.as_dict() self.assertEqual({'arg1': 1, 'arg2': [{'arg1': 'a', 'arg2': 'b'}]}, d) def test_event_repr_no_traits(self): x = event_models.Event("1", "name", "now", None, {}) self.assertEqual("", repr(x)) def test_get_field_names_of_sample(self): sample_fields = ["source", "counter_name", "counter_type", "counter_unit", "counter_volume", "user_id", "project_id", "resource_id", "timestamp", "resource_metadata", "message_id", "message_signature", "recorded_at"] self.assertEqual(set(sample_fields), set(models.Sample.get_field_names())) class TestTraitModel(testbase.BaseTestCase): def test_convert_value(self): v = event_models.Trait.convert_value( event_models.Trait.INT_TYPE, '10') self.assertEqual(10, v) self.assertIsInstance(v, int) v = event_models.Trait.convert_value( event_models.Trait.FLOAT_TYPE, '10') self.assertEqual(10.0, v) self.assertIsInstance(v, float) v = event_models.Trait.convert_value( event_models.Trait.DATETIME_TYPE, '2013-08-08 21:05:37.123456') self.assertEqual(datetime.datetime(2013, 8, 8, 21, 5, 37, 123456), v) self.assertIsInstance(v, datetime.datetime) v = event_models.Trait.convert_value( event_models.Trait.TEXT_TYPE, 10) self.assertEqual("10", v) self.assertIsInstance(v, six.text_type) ceilometer-6.1.5/ceilometer/tests/unit/storage/__init__.py0000664000567000056710000000000013072744703025037 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/hardware/0000775000567000056710000000000013072745164023073 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/hardware/inspector/0000775000567000056710000000000013072745164025101 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/hardware/inspector/test_snmp.py0000664000567000056710000001764213072744706027502 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Intel Corp # # Authors: Lianhao Lu # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/hardware/inspector/snmp/inspector.py """ from oslo_utils import netutils from oslotest import mockpatch from ceilometer.hardware.inspector import snmp from ceilometer.tests import base as test_base ins = snmp.SNMPInspector class FakeObjectName(object): def __init__(self, name): self.name = name def __str__(self): return str(self.name) def faux_getCmd_new(authData, transportTarget, *oids, **kwargs): varBinds = [(FakeObjectName(oid), int(oid.split('.')[-1])) for oid in oids] return (None, None, 0, varBinds) def faux_bulkCmd_new(authData, transportTarget, nonRepeaters, maxRepetitions, *oids, **kwargs): varBindTable = [ [(FakeObjectName(oid + ".%d" % i), i) for i in range(1, 3)] for oid in oids ] return (None, None, 0, varBindTable) class TestSNMPInspector(test_base.BaseTestCase): mapping = { 'test_exact': { 'matching_type': snmp.EXACT, 'metric_oid': ('1.3.6.1.4.1.2021.10.1.3.1', int), 'metadata': { 'meta': ('1.3.6.1.4.1.2021.10.1.3.8', int) }, 'post_op': '_fake_post_op', }, 'test_prefix': { 'matching_type': snmp.PREFIX, 'metric_oid': ('1.3.6.1.4.1.2021.9.1.8', int), 'metadata': { 'meta': ('1.3.6.1.4.1.2021.9.1.3', int) }, 'post_op': None, }, } def setUp(self): super(TestSNMPInspector, self).setUp() self.inspector = snmp.SNMPInspector() self.host = netutils.urlsplit("snmp://localhost") self.useFixture(mockpatch.PatchObject( self.inspector._cmdGen, 'getCmd', new=faux_getCmd_new)) self.useFixture(mockpatch.PatchObject( self.inspector._cmdGen, 'bulkCmd', new=faux_bulkCmd_new)) def test_snmp_error(self): def get_list(func, *args, **kwargs): return list(func(*args, **kwargs)) def faux_parse(ret, is_bulk): return (True, 'forced error') self.useFixture(mockpatch.PatchObject( snmp, 'parse_snmp_return', new=faux_parse)) self.assertRaises(snmp.SNMPException, get_list, self.inspector.inspect_generic, host=self.host, cache={}, extra_metadata={}, param=self.mapping['test_exact']) @staticmethod def _fake_post_op(host, cache, meter_def, value, metadata, extra, suffix): metadata.update(post_op_meta=4) extra.update(project_id=2) return value def test_inspect_generic_exact(self): self.inspector._fake_post_op = self._fake_post_op cache = {} ret = list(self.inspector.inspect_generic(self.host, cache, {}, self.mapping['test_exact'])) keys = cache[ins._CACHE_KEY_OID].keys() self.assertIn('1.3.6.1.4.1.2021.10.1.3.1', keys) self.assertIn('1.3.6.1.4.1.2021.10.1.3.8', keys) self.assertEqual(1, len(ret)) self.assertEqual(1, ret[0][0]) self.assertEqual(8, ret[0][1]['meta']) self.assertEqual(4, ret[0][1]['post_op_meta']) self.assertEqual(2, ret[0][2]['project_id']) def test_inspect_generic_prefix(self): cache = {} ret = list(self.inspector.inspect_generic(self.host, cache, {}, self.mapping['test_prefix'])) keys = cache[ins._CACHE_KEY_OID].keys() self.assertIn('1.3.6.1.4.1.2021.9.1.8' + '.1', keys) self.assertIn('1.3.6.1.4.1.2021.9.1.8' + '.2', keys) self.assertIn('1.3.6.1.4.1.2021.9.1.3' + '.1', keys) self.assertIn('1.3.6.1.4.1.2021.9.1.3' + '.2', keys) self.assertEqual(2, len(ret)) self.assertIn(ret[0][0], (1, 2)) self.assertEqual(ret[0][0], ret[0][1]['meta']) def test_post_op_net(self): self.useFixture(mockpatch.PatchObject( self.inspector._cmdGen, 'bulkCmd', new=faux_bulkCmd_new)) cache = {} metadata = dict(name='lo', speed=0, mac='ba21e43302fe') extra = {} ret = self.inspector._post_op_net(self.host, cache, None, value=8, metadata=metadata, extra=extra, suffix=".2") self.assertEqual(8, ret) self.assertIn('ip', metadata) self.assertIn("2", metadata['ip']) self.assertIn('resource_id', extra) self.assertEqual("localhost.lo", extra['resource_id']) def test_post_op_disk(self): cache = {} metadata = dict(device='/dev/sda1', path='/') extra = {} ret = self.inspector._post_op_disk(self.host, cache, None, value=8, metadata=metadata, extra=extra, suffix=None) self.assertEqual(8, ret) self.assertIn('resource_id', extra) self.assertEqual("localhost./dev/sda1", extra['resource_id']) def test_prepare_params(self): param = {'post_op': '_post_op_disk', 'oid': '1.3.6.1.4.1.2021.9.1.6', 'type': 'int', 'matching_type': 'type_prefix', 'metadata': { 'device': {'oid': '1.3.6.1.4.1.2021.9.1.3', 'type': 'str'}, 'path': {'oid': '1.3.6.1.4.1.2021.9.1.2', 'type': "lambda x: str(x)"}}} processed = self.inspector.prepare_params(param) self.assertEqual('_post_op_disk', processed['post_op']) self.assertEqual('1.3.6.1.4.1.2021.9.1.6', processed['metric_oid'][0]) self.assertEqual(int, processed['metric_oid'][1]) self.assertEqual(snmp.PREFIX, processed['matching_type']) self.assertEqual(2, len(processed['metadata'].keys())) self.assertEqual('1.3.6.1.4.1.2021.9.1.2', processed['metadata']['path'][0]) self.assertEqual("4", processed['metadata']['path'][1](4)) def test_pysnmp_ver43(self): # Test pysnmp version >=4.3 compatibility of ObjectIdentifier from distutils.version import StrictVersion import pysnmp has43 = StrictVersion(pysnmp.__version__) >= StrictVersion('4.3.0') oid = '1.3.6.4.1.2021.11.57.0' if has43: from pysnmp.entity import engine from pysnmp.smi import rfc1902 from pysnmp.smi import view snmp_engine = engine.SnmpEngine() mvc = view.MibViewController(snmp_engine.getMibBuilder()) name = rfc1902.ObjectIdentity(oid) name.resolveWithMib(mvc) else: from pysnmp.proto import rfc1902 name = rfc1902.ObjectName(oid) self.assertEqual(oid, str(name)) ceilometer-6.1.5/ceilometer/tests/unit/hardware/inspector/test_inspector.py0000664000567000056710000000217513072744706030526 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Intel Corp # # Authors: Lianhao Lu # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import netutils from ceilometer.hardware import inspector from ceilometer.tests import base class TestHardwareInspector(base.BaseTestCase): def test_get_inspector(self): url = netutils.urlsplit("snmp://") driver = inspector.get_inspector(url) self.assertTrue(driver) def test_get_inspector_illegal(self): url = netutils.urlsplit("illegal://") self.assertRaises(RuntimeError, inspector.get_inspector, url) ceilometer-6.1.5/ceilometer/tests/unit/hardware/inspector/__init__.py0000664000567000056710000000000013072744703027176 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/hardware/pollsters/0000775000567000056710000000000013072745164025122 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/hardware/pollsters/test_generic.py0000664000567000056710000001626413072744706030161 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six import yaml from oslo_config import fixture as fixture_config from oslo_utils import fileutils from oslotest import mockpatch from ceilometer import declarative from ceilometer.hardware.inspector import base as inspector_base from ceilometer.hardware.pollsters import generic from ceilometer import sample from ceilometer.tests import base as test_base class TestMeterDefinition(test_base.BaseTestCase): def test_config_definition(self): cfg = dict(name='test', type='gauge', unit='B', snmp_inspector={}) definition = generic.MeterDefinition(cfg) self.assertEqual('test', definition.name) self.assertEqual('gauge', definition.type) self.assertEqual('B', definition.unit) self.assertEqual({}, definition.snmp_inspector) def test_config_missing_field(self): cfg = dict(name='test', type='gauge') try: generic.MeterDefinition(cfg) except generic.MeterDefinitionException as e: self.assertEqual("Missing field unit", e.message) def test_config_invalid_field(self): cfg = dict(name='test', type='gauge', unit='B', invalid={}) definition = generic.MeterDefinition(cfg) self.assertEqual("foobar", getattr(definition, 'invalid', 'foobar')) def test_config_invalid_type_field(self): cfg = dict(name='test', type='invalid', unit='B', snmp_inspector={}) try: generic.MeterDefinition(cfg) except generic.MeterDefinitionException as e: self.assertEqual("Unrecognized type value invalid", e.message) @mock.patch('ceilometer.hardware.pollsters.generic.LOG') def test_bad_metric_skip(self, LOG): cfg = {'metric': [dict(name='test1', type='gauge', unit='B', snmp_inspector={}), dict(name='test_bad', type='invalid', unit='B', snmp_inspector={}), dict(name='test2', type='gauge', unit='B', snmp_inspector={})]} data = generic.load_definition(cfg) self.assertEqual(2, len(data)) LOG.error.assert_called_with( "Error loading meter definition : " "Unrecognized type value invalid") class FakeInspector(inspector_base.Inspector): net_metadata = dict(name='test.teest', mac='001122334455', ip='10.0.0.2', speed=1000) DATA = { 'test': (0.99, {}, {}), 'test2': (90, net_metadata, {}), } def inspect_generic(self, host, cache, extra_metadata=None, param=None): yield self.DATA[host.hostname] class TestGenericPollsters(test_base.BaseTestCase): @staticmethod def faux_get_inspector(url, namespace=None): return FakeInspector() def setUp(self): super(TestGenericPollsters, self).setUp() self.conf = self.useFixture(fixture_config.Config()).conf self.resources = ["snmp://test", "snmp://test2"] self.useFixture(mockpatch.Patch( 'ceilometer.hardware.inspector.get_inspector', self.faux_get_inspector)) self.conf(args=[]) self.pollster = generic.GenericHardwareDeclarativePollster() def __setup_meter_def_file(self, cfg): if six.PY3: cfg = cfg.encode('utf-8') meter_cfg_file = fileutils.write_to_tempfile(content=cfg, prefix="snmp", suffix="yaml") self.conf.set_override( 'meter_definitions_file', meter_cfg_file, group='hardware') cfg = declarative.load_definitions( {}, self.conf.hardware.meter_definitions_file) return cfg def _check_get_samples(self, name, definition, expected_value, expected_type, expected_unit=None): self.pollster._update_meter_definition(definition) cache = {} samples = list(self.pollster.get_samples(None, cache, self.resources)) self.assertTrue(samples) self.assertIn(self.pollster.CACHE_KEY, cache) for resource in self.resources: self.assertIn(resource, cache[self.pollster.CACHE_KEY]) self.assertEqual(set([name]), set([s.name for s in samples])) match = [s for s in samples if s.name == name] self.assertEqual(expected_value, match[0].volume) self.assertEqual(expected_type, match[0].type) if expected_unit: self.assertEqual(expected_unit, match[0].unit) def test_get_samples(self): param = dict(matching_type='type_exact', oid='1.3.6.1.4.1.2021.10.1.3.1', type='lambda x: float(str(x))') meter_def = generic.MeterDefinition(dict(type='gauge', name='hardware.test1', unit='process', snmp_inspector=param)) self._check_get_samples('hardware.test1', meter_def, 0.99, sample.TYPE_GAUGE, expected_unit='process') def test_get_pollsters_extensions(self): param = dict(matching_type='type_exact', oid='1.3.6.1.4.1.2021.10.1.3.1', type='lambda x: float(str(x))') meter_cfg = yaml.dump( {'metric': [dict(type='gauge', name='hardware.test1', unit='process', snmp_inspector=param), dict(type='gauge', name='hardware.test2.abc', unit='process', snmp_inspector=param)]}) self.__setup_meter_def_file(meter_cfg) pollster = generic.GenericHardwareDeclarativePollster # Clear cached mapping pollster.mapping = None exts = pollster.get_pollsters_extensions() self.assertEqual(2, len(exts)) self.assertIn(exts[0].name, ['hardware.test1', 'hardware.test2.abc']) self.assertIn(exts[1].name, ['hardware.test1', 'hardware.test2.abc']) ceilometer-6.1.5/ceilometer/tests/unit/hardware/pollsters/test_util.py0000664000567000056710000000465313072744706027521 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Intel Corp # # Authors: Lianhao Lu # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import netutils from ceilometer.hardware.pollsters import util from ceilometer import sample from ceilometer.tests import base as test_base class TestPollsterUtils(test_base.BaseTestCase): def setUp(self): super(TestPollsterUtils, self).setUp() self.host_url = netutils.urlsplit("snmp://127.0.0.1:161") def test_make_sample(self): s = util.make_sample_from_host(self.host_url, name='test', sample_type=sample.TYPE_GAUGE, unit='B', volume=1, res_metadata={ 'metakey': 'metaval', }) self.assertEqual('127.0.0.1', s.resource_id) self.assertIn('snmp://127.0.0.1:161', s.resource_metadata.values()) self.assertIn('metakey', s.resource_metadata.keys()) def test_make_sample_extra(self): extra = { 'project_id': 'project', 'resource_id': 'resource' } s = util.make_sample_from_host(self.host_url, name='test', sample_type=sample.TYPE_GAUGE, unit='B', volume=1, extra=extra) self.assertIsNone(s.user_id) self.assertEqual('project', s.project_id) self.assertEqual('resource', s.resource_id) self.assertEqual({'resource_url': 'snmp://127.0.0.1:161', 'project_id': 'project', 'resource_id': 'resource'}, s.resource_metadata) ceilometer-6.1.5/ceilometer/tests/unit/hardware/pollsters/__init__.py0000664000567000056710000000000013072744703027217 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/hardware/__init__.py0000664000567000056710000000000013072744703025170 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/network/0000775000567000056710000000000013072745164022767 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/network/test_floating_ip.py0000664000567000056710000001034413072744706026676 0ustar jenkinsjenkins00000000000000# Copyright 2016 Sungard Availability Services # Copyright 2016 Red Hat # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import base from oslotest import mockpatch from ceilometer.agent import manager from ceilometer.agent import plugin_base from ceilometer.network import floatingip from ceilometer.network.services import discovery class _BaseTestFloatingIPPollster(base.BaseTestCase): @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(_BaseTestFloatingIPPollster, self).setUp() self.manager = manager.AgentManager() plugin_base._get_keystone = mock.Mock() class TestFloatingIPPollster(_BaseTestFloatingIPPollster): def setUp(self): super(TestFloatingIPPollster, self).setUp() self.pollster = floatingip.FloatingIPPollster() fake_fip = self.fake_get_fip_service() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'fip_get_all', return_value=fake_fip)) @staticmethod def fake_get_fip_service(): return [{'router_id': 'e24f8a37-1bb7-49e4-833c-049bb21986d2', 'status': 'ACTIVE', 'tenant_id': '54a00c50ee4c4396b2f8dc220a2bed57', 'floating_network_id': 'f41f399e-d63e-47c6-9a19-21c4e4fbbba0', 'fixed_ip_address': '10.0.0.6', 'floating_ip_address': '65.79.162.11', 'port_id': '93a0d2c7-a397-444c-9d75-d2ac89b6f209', 'id': '18ca27bf-72bc-40c8-9c13-414d564ea367'}, {'router_id': 'astf8a37-1bb7-49e4-833c-049bb21986d2', 'status': 'DOWN', 'tenant_id': '34a00c50ee4c4396b2f8dc220a2bed57', 'floating_network_id': 'gh1f399e-d63e-47c6-9a19-21c4e4fbbba0', 'fixed_ip_address': '10.0.0.7', 'floating_ip_address': '65.79.162.12', 'port_id': '453a0d2c7-a397-444c-9d75-d2ac89b6f209', 'id': 'jkca27bf-72bc-40c8-9c13-414d564ea367'}, {'router_id': 'e2478937-1bb7-49e4-833c-049bb21986d2', 'status': 'error', 'tenant_id': '54a0gggg50ee4c4396b2f8dc220a2bed57', 'floating_network_id': 'po1f399e-d63e-47c6-9a19-21c4e4fbbba0', 'fixed_ip_address': '10.0.0.8', 'floating_ip_address': '65.79.162.13', 'port_id': '67a0d2c7-a397-444c-9d75-d2ac89b6f209', 'id': '90ca27bf-72bc-40c8-9c13-414d564ea367'}] def test_fip_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fip_service())) self.assertEqual(3, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_fip_service()[0][field], samples[0].resource_metadata[field]) def test_fip_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fip_service())) self.assertEqual(1, samples[0].volume) def test_get_fip_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fip_service())) self.assertEqual(set(['ip.floating']), set([s.name for s in samples])) def test_fip_discovery(self): discovered_fips = discovery.FloatingIPDiscovery().discover( self.manager) self.assertEqual(3, len(discovered_fips)) ceilometer-6.1.5/ceilometer/tests/unit/network/statistics/0000775000567000056710000000000013072745164025161 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/network/statistics/test_port.py0000664000567000056710000000715113072744703027560 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network.statistics import port from ceilometer import sample from ceilometer.tests.unit.network import statistics class TestPortPollsters(statistics._PollsterTestBase): def test_port_pollster(self): self._test_pollster( port.PortPollster, 'switch.port', sample.TYPE_GAUGE, 'port') def test_port_pollster_receive_packets(self): self._test_pollster( port.PortPollsterReceivePackets, 'switch.port.receive.packets', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_transmit_packets(self): self._test_pollster( port.PortPollsterTransmitPackets, 'switch.port.transmit.packets', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_receive_bytes(self): self._test_pollster( port.PortPollsterReceiveBytes, 'switch.port.receive.bytes', sample.TYPE_CUMULATIVE, 'B') def test_port_pollster_transmit_bytes(self): self._test_pollster( port.PortPollsterTransmitBytes, 'switch.port.transmit.bytes', sample.TYPE_CUMULATIVE, 'B') def test_port_pollster_receive_drops(self): self._test_pollster( port.PortPollsterReceiveDrops, 'switch.port.receive.drops', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_transmit_drops(self): self._test_pollster( port.PortPollsterTransmitDrops, 'switch.port.transmit.drops', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_receive_errors(self): self._test_pollster( port.PortPollsterReceiveErrors, 'switch.port.receive.errors', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_transmit_errors(self): self._test_pollster( port.PortPollsterTransmitErrors, 'switch.port.transmit.errors', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_receive_frame_errors(self): self._test_pollster( port.PortPollsterReceiveFrameErrors, 'switch.port.receive.frame_error', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_receive_overrun_errors(self): self._test_pollster( port.PortPollsterReceiveOverrunErrors, 'switch.port.receive.overrun_error', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_receive_crc_errors(self): self._test_pollster( port.PortPollsterReceiveCRCErrors, 'switch.port.receive.crc_error', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_collision_count(self): self._test_pollster( port.PortPollsterCollisionCount, 'switch.port.collision.count', sample.TYPE_CUMULATIVE, 'packet') ceilometer-6.1.5/ceilometer/tests/unit/network/statistics/test_statistics.py0000664000567000056710000001466213072744706030776 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_utils import timeutils from oslotest import base from ceilometer.network import statistics from ceilometer.network.statistics import driver from ceilometer import sample class TestBase(base.BaseTestCase): @staticmethod def test_subclass_ok(): class OkSubclass(statistics._Base): meter_name = 'foo' meter_type = sample.TYPE_GAUGE meter_unit = 'B' OkSubclass() def test_subclass_ng(self): class NgSubclass1(statistics._Base): """meter_name is lost.""" meter_type = sample.TYPE_GAUGE meter_unit = 'B' class NgSubclass2(statistics._Base): """meter_type is lost.""" meter_name = 'foo' meter_unit = 'B' class NgSubclass3(statistics._Base): """meter_unit is lost.""" meter_name = 'foo' meter_type = sample.TYPE_GAUGE self.assertRaises(TypeError, NgSubclass1) self.assertRaises(TypeError, NgSubclass2) self.assertRaises(TypeError, NgSubclass3) class TestBaseGetSamples(base.BaseTestCase): def setUp(self): super(TestBaseGetSamples, self).setUp() class FakePollster(statistics._Base): meter_name = 'foo' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'bar' self.pollster = FakePollster() def tearDown(self): statistics._Base.drivers = {} super(TestBaseGetSamples, self).tearDown() @staticmethod def _setup_ext_mgr(**drivers): statistics._Base.drivers = drivers def _make_fake_driver(self, *return_values): class FakeDriver(driver.Driver): def __init__(self): self.index = 0 def get_sample_data(self, meter_name, parse_url, params, cache): if self.index >= len(return_values): yield None retval = return_values[self.index] self.index += 1 yield retval return FakeDriver @staticmethod def _make_timestamps(count): now = timeutils.utcnow() return [(now + datetime.timedelta(seconds=i)).isoformat() for i in range(count)] def _get_samples(self, *resources): return [v for v in self.pollster.get_samples(self, {}, resources)] def _assert_sample(self, s, volume, resource_id, resource_metadata, timestamp): self.assertEqual('foo', s.name) self.assertEqual(sample.TYPE_CUMULATIVE, s.type) self.assertEqual('bar', s.unit) self.assertEqual(volume, s.volume) self.assertIsNone(s.user_id) self.assertIsNone(s.project_id) self.assertEqual(resource_id, s.resource_id) self.assertEqual(timestamp, s.timestamp) self.assertEqual(resource_metadata, s.resource_metadata) def test_get_samples_one_driver_one_resource(self): times = self._make_timestamps(2) fake_driver = self._make_fake_driver((1, 'a', {'spam': 'egg'}, times[0]), (2, 'b', None, times[1])) self._setup_ext_mgr(http=fake_driver()) samples = self._get_samples('http://foo') self.assertEqual(1, len(samples)) self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, times[0]) def test_get_samples_one_driver_two_resource(self): times = self._make_timestamps(3) fake_driver = self._make_fake_driver((1, 'a', {'spam': 'egg'}, times[0]), (2, 'b', None, times[1]), (3, 'c', None, times[2])) self._setup_ext_mgr(http=fake_driver()) samples = self._get_samples('http://foo', 'http://bar') self.assertEqual(2, len(samples)) self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, times[0]) self._assert_sample(samples[1], 2, 'b', None, times[1]) def test_get_samples_two_driver_one_resource(self): times = self._make_timestamps(4) fake_driver1 = self._make_fake_driver((1, 'a', {'spam': 'egg'}, times[0]), (2, 'b', None), times[1]) fake_driver2 = self._make_fake_driver((11, 'A', None, times[2]), (12, 'B', None, times[3])) self._setup_ext_mgr(http=fake_driver1(), https=fake_driver2()) samples = self._get_samples('http://foo') self.assertEqual(1, len(samples)) self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, times[0]) def test_get_samples_multi_samples(self): times = self._make_timestamps(2) fake_driver = self._make_fake_driver([(1, 'a', {'spam': 'egg'}, times[0]), (2, 'b', None, times[1])]) self._setup_ext_mgr(http=fake_driver()) samples = self._get_samples('http://foo') self.assertEqual(2, len(samples)) self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, times[0]) self._assert_sample(samples[1], 2, 'b', None, times[1]) def test_get_samples_return_none(self): fake_driver = self._make_fake_driver(None) self._setup_ext_mgr(http=fake_driver()) samples = self._get_samples('http://foo') self.assertEqual(0, len(samples)) def test_get_samples_return_no_generator(self): class NoneFakeDriver(driver.Driver): def get_sample_data(self, meter_name, parse_url, params, cache): return None self._setup_ext_mgr(http=NoneFakeDriver()) samples = self._get_samples('http://foo') self.assertFalse(samples) ceilometer-6.1.5/ceilometer/tests/unit/network/statistics/opendaylight/0000775000567000056710000000000013072745164027650 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/network/statistics/opendaylight/test_client.py0000664000567000056710000001407113072744706032543 0ustar jenkinsjenkins00000000000000# # Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as config_fixture from oslotest import base from requests import auth as req_auth import six from six.moves.urllib import parse as urlparse from ceilometer.i18n import _ from ceilometer.network.statistics.opendaylight import client from ceilometer import service as ceilometer_service class TestClientHTTPBasicAuth(base.BaseTestCase): auth_way = 'basic' scheme = 'http' def setUp(self): super(TestClientHTTPBasicAuth, self).setUp() self.conf = self.useFixture(config_fixture.Config()) ceilometer_service.prepare_service(argv=[], config_files=[]) self.parsed_url = urlparse.urlparse( 'http://127.0.0.1:8080/controller/nb/v2?container_name=default&' 'container_name=egg&auth=%s&user=admin&password=admin_pass&' 'scheme=%s' % (self.auth_way, self.scheme)) self.params = urlparse.parse_qs(self.parsed_url.query) self.endpoint = urlparse.urlunparse( urlparse.ParseResult(self.scheme, self.parsed_url.netloc, self.parsed_url.path, None, None, None)) odl_params = {'auth': self.params.get('auth')[0], 'user': self.params.get('user')[0], 'password': self.params.get('password')[0]} self.client = client.Client(self.endpoint, odl_params) self.resp = mock.MagicMock() self.get = mock.patch('requests.get', return_value=self.resp).start() self.resp.raw.version = 1.1 self.resp.status_code = 200 self.resp.reason = 'OK' self.resp.headers = {} self.resp.content = 'dummy' def _test_request(self, method, url): data = method('default') call_args = self.get.call_args_list[0][0] call_kwargs = self.get.call_args_list[0][1] # check url real_url = url % {'container_name': 'default', 'scheme': self.scheme} self.assertEqual(real_url, call_args[0]) # check auth parameters auth = call_kwargs.get('auth') if self.auth_way == 'digest': self.assertIsInstance(auth, req_auth.HTTPDigestAuth) else: self.assertIsInstance(auth, req_auth.HTTPBasicAuth) self.assertEqual('admin', auth.username) self.assertEqual('admin_pass', auth.password) # check header self.assertEqual( {'Accept': 'application/json'}, call_kwargs['headers']) # check return value self.assertEqual(self.get().json(), data) def test_flow_statistics(self): self._test_request( self.client.statistics.get_flow_statistics, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/statistics/%(container_name)s/flow') def test_port_statistics(self): self._test_request( self.client.statistics.get_port_statistics, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/statistics/%(container_name)s/port') def test_table_statistics(self): self._test_request( self.client.statistics.get_table_statistics, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/statistics/%(container_name)s/table') def test_topology(self): self._test_request( self.client.topology.get_topology, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/topology/%(container_name)s') def test_user_links(self): self._test_request( self.client.topology.get_user_links, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/topology/%(container_name)s/userLinks') def test_switch(self): self._test_request( self.client.switch_manager.get_nodes, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/switchmanager/%(container_name)s/nodes') def test_active_hosts(self): self._test_request( self.client.host_tracker.get_active_hosts, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/hosttracker/%(container_name)s/hosts/active') def test_inactive_hosts(self): self._test_request( self.client.host_tracker.get_inactive_hosts, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/hosttracker/%(container_name)s/hosts/inactive') def test_http_error(self): self.resp.status_code = 404 self.resp.reason = 'Not Found' try: self.client.statistics.get_flow_statistics('default') self.fail('') except client.OpenDaylightRESTAPIFailed as e: self.assertEqual( _('OpenDaylitght API returned %(status)s %(reason)s') % {'status': self.resp.status_code, 'reason': self.resp.reason}, six.text_type(e)) def test_other_error(self): class _Exception(Exception): pass self.get = mock.patch('requests.get', side_effect=_Exception).start() self.assertRaises(_Exception, self.client.statistics.get_flow_statistics, 'default') class TestClientHTTPDigestAuth(TestClientHTTPBasicAuth): auth_way = 'digest' class TestClientHTTPSBasicAuth(TestClientHTTPBasicAuth): scheme = 'https' class TestClientHTTPSDigestAuth(TestClientHTTPDigestAuth): scheme = 'https' ceilometer-6.1.5/ceilometer/tests/unit/network/statistics/opendaylight/test_driver.py0000664000567000056710000020136313072744706032562 0ustar jenkinsjenkins00000000000000# # Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import mock from oslotest import base import six from six import moves from six.moves.urllib import parse as url_parse from ceilometer.network.statistics.opendaylight import driver @six.add_metaclass(abc.ABCMeta) class _Base(base.BaseTestCase): @abc.abstractproperty def flow_data(self): pass @abc.abstractproperty def port_data(self): pass @abc.abstractproperty def table_data(self): pass @abc.abstractproperty def topology_data(self): pass @abc.abstractproperty def switch_data(self): pass @abc.abstractproperty def user_links_data(self): pass @abc.abstractproperty def active_hosts_data(self): pass @abc.abstractproperty def inactive_hosts_data(self): pass fake_odl_url = url_parse.ParseResult('opendaylight', 'localhost:8080', 'controller/nb/v2', None, None, None) fake_params = url_parse.parse_qs('user=admin&password=admin&scheme=http&' 'container_name=default&auth=basic') fake_params_multi_container = ( url_parse.parse_qs('user=admin&password=admin&scheme=http&' 'container_name=first&container_name=second&' 'auth=basic')) def setUp(self): super(_Base, self).setUp() self.addCleanup(mock.patch.stopall) self.driver = driver.OpenDayLightDriver() self.get_flow_statistics = mock.patch( 'ceilometer.network.statistics.opendaylight.client.' 'StatisticsAPIClient.get_flow_statistics', return_value=self.flow_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'StatisticsAPIClient.get_table_statistics', return_value=self.table_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'StatisticsAPIClient.get_port_statistics', return_value=self.port_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'TopologyAPIClient.get_topology', return_value=self.topology_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'TopologyAPIClient.get_user_links', return_value=self.user_links_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'SwitchManagerAPIClient.get_nodes', return_value=self.switch_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'HostTrackerAPIClient.get_active_hosts', return_value=self.active_hosts_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'HostTrackerAPIClient.get_inactive_hosts', return_value=self.inactive_hosts_data).start() def _test_for_meter(self, meter_name, expected_data): sample_data = self.driver.get_sample_data(meter_name, self.fake_odl_url, self.fake_params, {}) for sample, expected in moves.zip(sample_data, expected_data): self.assertEqual(expected[0], sample[0]) # check volume self.assertEqual(expected[1], sample[1]) # check resource id self.assertEqual(expected[2], sample[2]) # check resource metadata self.assertIsNotNone(sample[3]) # timestamp class TestOpenDayLightDriverSpecial(_Base): flow_data = {"flowStatistics": []} port_data = {"portStatistics": []} table_data = {"tableStatistics": []} topology_data = {"edgeProperties": []} switch_data = {"nodeProperties": []} user_links_data = {"userLinks": []} active_hosts_data = {"hostConfig": []} inactive_hosts_data = {"hostConfig": []} def test_not_implemented_meter(self): sample_data = self.driver.get_sample_data('egg', self.fake_odl_url, self.fake_params, {}) self.assertIsNone(sample_data) sample_data = self.driver.get_sample_data('switch.table.egg', self.fake_odl_url, self.fake_params, {}) self.assertIsNone(sample_data) def test_cache(self): cache = {} self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params, cache) self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params, cache) self.assertEqual(1, self.get_flow_statistics.call_count) cache = {} self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params, cache) self.assertEqual(2, self.get_flow_statistics.call_count) def test_multi_container(self): cache = {} self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params_multi_container, cache) self.assertEqual(2, self.get_flow_statistics.call_count) self.assertIn('network.statistics.opendaylight', cache) odl_data = cache['network.statistics.opendaylight'] self.assertIn('first', odl_data) self.assertIn('second', odl_data) def test_http_error(self): mock.patch('ceilometer.network.statistics.opendaylight.client.' 'StatisticsAPIClient.get_flow_statistics', side_effect=Exception()).start() sample_data = self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params, {}) self.assertEqual(0, len(sample_data)) mock.patch('ceilometer.network.statistics.opendaylight.client.' 'StatisticsAPIClient.get_flow_statistics', side_effect=[Exception(), self.flow_data]).start() cache = {} self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params_multi_container, cache) self.assertIn('network.statistics.opendaylight', cache) odl_data = cache['network.statistics.opendaylight'] self.assertIn('second', odl_data) class TestOpenDayLightDriverSimple(_Base): flow_data = { "flowStatistics": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "flowStatistic": [ { "flow": { "match": { "matchField": [ { "type": "DL_TYPE", "value": "2048" }, { "mask": "255.255.255.255", "type": "NW_DST", "value": "1.1.1.1" } ] }, "actions": { "@type": "output", "port": { "id": "3", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" } }, "hardTimeout": "0", "id": "0", "idleTimeout": "0", "priority": "1" }, "byteCount": "0", "durationNanoseconds": "397000000", "durationSeconds": "1828", "packetCount": "0", "tableId": "0" }, ] } ] } port_data = { "portStatistics": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "portStatistic": [ { "nodeConnector": { "id": "4", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "collisionCount": "0", "receiveBytes": "0", "receiveCrcError": "0", "receiveDrops": "0", "receiveErrors": "0", "receiveFrameError": "0", "receiveOverRunError": "0", "receivePackets": "0", "transmitBytes": "0", "transmitDrops": "0", "transmitErrors": "0", "transmitPackets": "0" }, ] } ] } table_data = { "tableStatistics": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "tableStatistic": [ { "activeCount": "11", "lookupCount": "816", "matchedCount": "220", "nodeTable": { "id": "0", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" } } }, ] } ] } topology_data = {"edgeProperties": []} switch_data = { "nodeProperties": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "properties": { "actions": { "value": "4095" }, "timeStamp": { "name": "connectedSince", "value": "1377291227877" } } }, ] } user_links_data = {"userLinks": []} active_hosts_data = {"hostConfig": []} inactive_hosts_data = {"hostConfig": []} def test_meter_switch(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', "properties_actions": "4095", "properties_timeStamp_connectedSince": "1377291227877" }), ] self._test_for_meter('switch', expected_data) def test_meter_switch_port(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4', }), ] self._test_for_meter('switch.port', expected_data) def test_meter_switch_port_receive_packets(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.receive.packets', expected_data) def test_meter_switch_port_transmit_packets(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.transmit.packets', expected_data) def test_meter_switch_port_receive_bytes(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.receive.bytes', expected_data) def test_meter_switch_port_transmit_bytes(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.transmit.bytes', expected_data) def test_meter_switch_port_receive_drops(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.receive.drops', expected_data) def test_meter_switch_port_transmit_drops(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.transmit.drops', expected_data) def test_meter_switch_port_receive_errors(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.receive.errors', expected_data) def test_meter_switch_port_transmit_errors(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.transmit.errors', expected_data) def test_meter_switch_port_receive_frame_error(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.receive.frame_error', expected_data) def test_meter_switch_port_receive_overrun_error(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.receive.overrun_error', expected_data) def test_meter_switch_port_receive_crc_error(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.receive.crc_error', expected_data) def test_meter_switch_port_collision_count(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.collision.count', expected_data) def test_meter_switch_table(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}), ] self._test_for_meter('switch.table', expected_data) def test_meter_switch_table_active_entries(self): expected_data = [ (11, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}), ] self._test_for_meter('switch.table.active.entries', expected_data) def test_meter_switch_table_lookup_packets(self): expected_data = [ (816, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}), ] self._test_for_meter('switch.table.lookup.packets', expected_data) def test_meter_switch_table_matched_packets(self): expected_data = [ (220, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}), ] self._test_for_meter('switch.table.matched.packets', expected_data) def test_meter_switch_flow(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1" }), ] self._test_for_meter('switch.flow', expected_data) def test_meter_switch_flow_duration_seconds(self): expected_data = [ (1828, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), ] self._test_for_meter('switch.flow.duration_seconds', expected_data) def test_meter_switch_flow_duration_nanoseconds(self): expected_data = [ (397000000, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), ] self._test_for_meter('switch.flow.duration_nanoseconds', expected_data) def test_meter_switch_flow_packets(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), ] self._test_for_meter('switch.flow.packets', expected_data) def test_meter_switch_flow_bytes(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), ] self._test_for_meter('switch.flow.bytes', expected_data) class TestOpenDayLightDriverComplex(_Base): flow_data = { "flowStatistics": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "flowStatistic": [ { "flow": { "match": { "matchField": [ { "type": "DL_TYPE", "value": "2048" }, { "mask": "255.255.255.255", "type": "NW_DST", "value": "1.1.1.1" } ] }, "actions": { "@type": "output", "port": { "id": "3", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" } }, "hardTimeout": "0", "id": "0", "idleTimeout": "0", "priority": "1" }, "byteCount": "0", "durationNanoseconds": "397000000", "durationSeconds": "1828", "packetCount": "0", "tableId": "0" }, { "flow": { "match": { "matchField": [ { "type": "DL_TYPE", "value": "2048" }, { "mask": "255.255.255.255", "type": "NW_DST", "value": "1.1.1.2" } ] }, "actions": { "@type": "output", "port": { "id": "4", "node": { "id": "00:00:00:00:00:00:00:03", "type": "OF" }, "type": "OF" } }, "hardTimeout": "0", "id": "0", "idleTimeout": "0", "priority": "1" }, "byteCount": "89", "durationNanoseconds": "200000", "durationSeconds": "5648", "packetCount": "30", "tableId": "1" } ] } ] } port_data = { "portStatistics": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "portStatistic": [ { "nodeConnector": { "id": "4", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "collisionCount": "0", "receiveBytes": "0", "receiveCrcError": "0", "receiveDrops": "0", "receiveErrors": "0", "receiveFrameError": "0", "receiveOverRunError": "0", "receivePackets": "0", "transmitBytes": "0", "transmitDrops": "0", "transmitErrors": "0", "transmitPackets": "0" }, { "nodeConnector": { "id": "3", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "collisionCount": "0", "receiveBytes": "12740", "receiveCrcError": "0", "receiveDrops": "0", "receiveErrors": "0", "receiveFrameError": "0", "receiveOverRunError": "0", "receivePackets": "182", "transmitBytes": "12110", "transmitDrops": "0", "transmitErrors": "0", "transmitPackets": "173" }, { "nodeConnector": { "id": "2", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "collisionCount": "0", "receiveBytes": "12180", "receiveCrcError": "0", "receiveDrops": "0", "receiveErrors": "0", "receiveFrameError": "0", "receiveOverRunError": "0", "receivePackets": "174", "transmitBytes": "12670", "transmitDrops": "0", "transmitErrors": "0", "transmitPackets": "181" }, { "nodeConnector": { "id": "1", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "collisionCount": "0", "receiveBytes": "0", "receiveCrcError": "0", "receiveDrops": "0", "receiveErrors": "0", "receiveFrameError": "0", "receiveOverRunError": "0", "receivePackets": "0", "transmitBytes": "0", "transmitDrops": "0", "transmitErrors": "0", "transmitPackets": "0" }, { "nodeConnector": { "id": "0", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "collisionCount": "0", "receiveBytes": "0", "receiveCrcError": "0", "receiveDrops": "0", "receiveErrors": "0", "receiveFrameError": "0", "receiveOverRunError": "0", "receivePackets": "0", "transmitBytes": "0", "transmitDrops": "0", "transmitErrors": "0", "transmitPackets": "0" } ] } ] } table_data = { "tableStatistics": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "tableStatistic": [ { "activeCount": "11", "lookupCount": "816", "matchedCount": "220", "nodeTable": { "id": "0", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" } } }, { "activeCount": "20", "lookupCount": "10", "matchedCount": "5", "nodeTable": { "id": "1", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" } } } ] } ] } topology_data = { "edgeProperties": [ { "edge": { "headNodeConnector": { "id": "2", "node": { "id": "00:00:00:00:00:00:00:03", "type": "OF" }, "type": "OF" }, "tailNodeConnector": { "id": "2", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" } }, "properties": { "bandwidth": { "value": 10000000000 }, "config": { "value": 1 }, "name": { "value": "s2-eth3" }, "state": { "value": 1 }, "timeStamp": { "name": "creation", "value": 1379527162648 } } }, { "edge": { "headNodeConnector": { "id": "5", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "tailNodeConnector": { "id": "2", "node": { "id": "00:00:00:00:00:00:00:04", "type": "OF" }, "type": "OF" } }, "properties": { "timeStamp": { "name": "creation", "value": 1379527162648 } } } ] } switch_data = { "nodeProperties": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "properties": { "actions": { "value": "4095" }, "buffers": { "value": "256" }, "capabilities": { "value": "199" }, "description": { "value": "None" }, "macAddress": { "value": "00:00:00:00:00:02" }, "tables": { "value": "-1" }, "timeStamp": { "name": "connectedSince", "value": "1377291227877" } } }, { "node": { "id": "00:00:00:00:00:00:00:03", "type": "OF" }, "properties": { "actions": { "value": "1024" }, "buffers": { "value": "512" }, "capabilities": { "value": "1000" }, "description": { "value": "Foo Bar" }, "macAddress": { "value": "00:00:00:00:00:03" }, "tables": { "value": "10" }, "timeStamp": { "name": "connectedSince", "value": "1377291228000" } } } ] } user_links_data = { "userLinks": [ { "dstNodeConnector": "OF|5@OF|00:00:00:00:00:00:00:05", "name": "link1", "srcNodeConnector": "OF|3@OF|00:00:00:00:00:00:00:02", "status": "Success" } ] } active_hosts_data = { "hostConfig": [ { "dataLayerAddress": "00:00:00:00:01:01", "networkAddress": "1.1.1.1", "nodeConnectorId": "9", "nodeConnectorType": "OF", "nodeId": "00:00:00:00:00:00:00:01", "nodeType": "OF", "staticHost": "false", "vlan": "0" }, { "dataLayerAddress": "00:00:00:00:02:02", "networkAddress": "2.2.2.2", "nodeConnectorId": "1", "nodeConnectorType": "OF", "nodeId": "00:00:00:00:00:00:00:02", "nodeType": "OF", "staticHost": "true", "vlan": "0" } ] } inactive_hosts_data = { "hostConfig": [ { "dataLayerAddress": "00:00:00:01:01:01", "networkAddress": "1.1.1.3", "nodeConnectorId": "8", "nodeConnectorType": "OF", "nodeId": "00:00:00:00:00:00:00:01", "nodeType": "OF", "staticHost": "false", "vlan": "0" }, { "dataLayerAddress": "00:00:00:01:02:02", "networkAddress": "2.2.2.4", "nodeConnectorId": "0", "nodeConnectorType": "OF", "nodeId": "00:00:00:00:00:00:00:02", "nodeType": "OF", "staticHost": "false", "vlan": "1" } ] } def test_meter_switch(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', "properties_actions": "4095", "properties_buffers": "256", "properties_capabilities": "199", "properties_description": "None", "properties_macAddress": "00:00:00:00:00:02", "properties_tables": "-1", "properties_timeStamp_connectedSince": "1377291227877" }), (1, "00:00:00:00:00:00:00:03", { 'controller': 'OpenDaylight', 'container': 'default', "properties_actions": "1024", "properties_buffers": "512", "properties_capabilities": "1000", "properties_description": "Foo Bar", "properties_macAddress": "00:00:00:00:00:03", "properties_tables": "10", "properties_timeStamp_connectedSince": "1377291228000" }), ] self._test_for_meter('switch', expected_data) def test_meter_switch_port(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4', }), (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3', 'user_link_node_id': '00:00:00:00:00:00:00:05', 'user_link_node_port': '5', 'user_link_status': 'Success', 'user_link_name': 'link1', }), (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2', 'topology_node_id': '00:00:00:00:00:00:00:03', 'topology_node_port': '2', "topology_bandwidth": 10000000000, "topology_config": 1, "topology_name": "s2-eth3", "topology_state": 1, "topology_timeStamp_creation": 1379527162648 }), (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1', 'host_status': 'active', 'host_dataLayerAddress': '00:00:00:00:02:02', 'host_networkAddress': '2.2.2.2', 'host_staticHost': 'true', 'host_vlan': '0', }), (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0', 'host_status': 'inactive', 'host_dataLayerAddress': '00:00:00:01:02:02', 'host_networkAddress': '2.2.2.4', 'host_staticHost': 'false', 'host_vlan': '1', }), ] self._test_for_meter('switch.port', expected_data) def test_meter_switch_port_receive_packets(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (182, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (174, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.receive.packets', expected_data) def test_meter_switch_port_transmit_packets(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (173, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (181, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.transmit.packets', expected_data) def test_meter_switch_port_receive_bytes(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (12740, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (12180, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.receive.bytes', expected_data) def test_meter_switch_port_transmit_bytes(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (12110, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (12670, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.transmit.bytes', expected_data) def test_meter_switch_port_receive_drops(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.receive.drops', expected_data) def test_meter_switch_port_transmit_drops(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.transmit.drops', expected_data) def test_meter_switch_port_receive_errors(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.receive.errors', expected_data) def test_meter_switch_port_transmit_errors(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.transmit.errors', expected_data) def test_meter_switch_port_receive_frame_error(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.receive.frame_error', expected_data) def test_meter_switch_port_receive_overrun_error(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.receive.overrun_error', expected_data) def test_meter_switch_port_receive_crc_error(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.receive.crc_error', expected_data) def test_meter_switch_port_collision_count(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.collision.count', expected_data) def test_meter_switch_table(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}), (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1'}), ] self._test_for_meter('switch.table', expected_data) def test_meter_switch_table_active_entries(self): expected_data = [ (11, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}), (20, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1'}), ] self._test_for_meter('switch.table.active.entries', expected_data) def test_meter_switch_table_lookup_packets(self): expected_data = [ (816, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}), (10, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1'}), ] self._test_for_meter('switch.table.lookup.packets', expected_data) def test_meter_switch_table_matched_packets(self): expected_data = [ (220, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}), (5, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1'}), ] self._test_for_meter('switch.table.matched.packets', expected_data) def test_meter_switch_flow(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1" }), (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.2", "flow_actions_@type": "output", "flow_actions_port_id": "4", "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1" }), ] self._test_for_meter('switch.flow', expected_data) def test_meter_switch_flow_duration_seconds(self): expected_data = [ (1828, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), (5648, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.2", "flow_actions_@type": "output", "flow_actions_port_id": "4", "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), ] self._test_for_meter('switch.flow.duration_seconds', expected_data) def test_meter_switch_flow_duration_nanoseconds(self): expected_data = [ (397000000, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), (200000, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.2", "flow_actions_@type": "output", "flow_actions_port_id": "4", "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), ] self._test_for_meter('switch.flow.duration_nanoseconds', expected_data) def test_meter_switch_flow_packets(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), (30, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.2", "flow_actions_@type": "output", "flow_actions_port_id": "4", "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), ] self._test_for_meter('switch.flow.packets', expected_data) def test_meter_switch_flow_bytes(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), (89, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.2", "flow_actions_@type": "output", "flow_actions_port_id": "4", "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), ] self._test_for_meter('switch.flow.bytes', expected_data) ceilometer-6.1.5/ceilometer/tests/unit/network/statistics/opendaylight/__init__.py0000664000567000056710000000000013072744703031745 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/network/statistics/test_flow.py0000664000567000056710000000342413072744703027542 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network.statistics import flow from ceilometer import sample from ceilometer.tests.unit.network import statistics class TestFlowPollsters(statistics._PollsterTestBase): def test_flow_pollster(self): self._test_pollster( flow.FlowPollster, 'switch.flow', sample.TYPE_GAUGE, 'flow') def test_flow_pollster_duration_seconds(self): self._test_pollster( flow.FlowPollsterDurationSeconds, 'switch.flow.duration_seconds', sample.TYPE_GAUGE, 's') def test_flow_pollster_duration_nanoseconds(self): self._test_pollster( flow.FlowPollsterDurationNanoseconds, 'switch.flow.duration_nanoseconds', sample.TYPE_GAUGE, 'ns') def test_flow_pollster_packets(self): self._test_pollster( flow.FlowPollsterPackets, 'switch.flow.packets', sample.TYPE_CUMULATIVE, 'packet') def test_flow_pollster_bytes(self): self._test_pollster( flow.FlowPollsterBytes, 'switch.flow.bytes', sample.TYPE_CUMULATIVE, 'B') ceilometer-6.1.5/ceilometer/tests/unit/network/statistics/test_switch.py0000664000567000056710000000170713072744703030076 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network.statistics import switch from ceilometer import sample from ceilometer.tests.unit.network import statistics class TestSwitchPollster(statistics._PollsterTestBase): def test_table_pollster(self): self._test_pollster( switch.SWPollster, 'switch', sample.TYPE_GAUGE, 'switch') ceilometer-6.1.5/ceilometer/tests/unit/network/statistics/test_driver.py0000664000567000056710000000207513072744706030072 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base from ceilometer.network.statistics import driver class TestDriver(base.BaseTestCase): @staticmethod def test_driver_ok(): class OkDriver(driver.Driver): def get_sample_data(self, meter_name, resources, cache): pass OkDriver() def test_driver_ng(self): class NgDriver(driver.Driver): """get_sample_data method is lost.""" self.assertRaises(TypeError, NgDriver) ceilometer-6.1.5/ceilometer/tests/unit/network/statistics/opencontrail/0000775000567000056710000000000013072745164027656 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/network/statistics/opencontrail/test_client.py0000664000567000056710000000517313072744706032554 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as config_fixture from oslotest import base from ceilometer.network.statistics.opencontrail import client from ceilometer import service as ceilometer_service class TestOpencontrailClient(base.BaseTestCase): def setUp(self): super(TestOpencontrailClient, self).setUp() self.conf = self.useFixture(config_fixture.Config()) ceilometer_service.prepare_service(argv=[], config_files=[]) self.client = client.Client('http://127.0.0.1:8081', {'arg1': 'aaa'}) self.get_resp = mock.MagicMock() self.get = mock.patch('requests.get', return_value=self.get_resp).start() self.get_resp.raw.version = 1.1 self.get_resp.status_code = 200 self.get_resp.reason = 'OK' self.get_resp.content = '' def test_vm_statistics(self): self.client.networks.get_vm_statistics('bbb') call_args = self.get.call_args_list[0][0] call_kwargs = self.get.call_args_list[0][1] expected_url = ('http://127.0.0.1:8081/analytics/' 'uves/virtual-machine/bbb') self.assertEqual(expected_url, call_args[0]) data = call_kwargs.get('data') expected_data = {'arg1': 'aaa'} self.assertEqual(expected_data, data) def test_vm_statistics_params(self): self.client.networks.get_vm_statistics('bbb', {'resource': 'fip_stats_list', 'virtual_network': 'ccc'}) call_args = self.get.call_args_list[0][0] call_kwargs = self.get.call_args_list[0][1] expected_url = ('http://127.0.0.1:8081/analytics/' 'uves/virtual-machine/bbb') self.assertEqual(expected_url, call_args[0]) data = call_kwargs.get('data') expected_data = {'arg1': 'aaa', 'resource': 'fip_stats_list', 'virtual_network': 'ccc'} self.assertEqual(expected_data, data) ceilometer-6.1.5/ceilometer/tests/unit/network/statistics/opencontrail/test_driver.py0000664000567000056710000002722513072744706032573 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import base from six.moves.urllib import parse as urlparse from ceilometer.network.statistics.opencontrail import driver class TestOpencontrailDriver(base.BaseTestCase): def setUp(self): super(TestOpencontrailDriver, self).setUp() self.nc_ports = mock.patch('ceilometer.neutron_client' '.Client.port_get_all', return_value=self.fake_ports()) self.nc_ports.start() self.driver = driver.OpencontrailDriver() self.parse_url = urlparse.ParseResult('opencontrail', '127.0.0.1:8143', '/', None, None, None) self.params = {'password': ['admin'], 'scheme': ['http'], 'username': ['admin'], 'verify_ssl': ['false'], 'resource': ['if_stats_list']} @staticmethod def fake_ports(): return [{'admin_state_up': True, 'device_owner': 'compute:None', 'device_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'extra_dhcp_opts': [], 'id': '96d49cc3-4e01-40ce-9cac-c0e32642a442', 'mac_address': 'fa:16:3e:c5:35:93', 'name': '', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'status': 'ACTIVE', 'tenant_id': '89271fa581ab4380bf172f868c3615f9'}] @staticmethod def fake_port_stats(): return {"value": [{ "name": "c588ebb7-ae52-485a-9f0c-b2791c5da196", "value": { "UveVirtualMachineAgent": { "if_stats_list": [{ "out_bytes": 22, "in_bandwidth_usage": 0, "in_bytes": 23, "out_bandwidth_usage": 0, "out_pkts": 5, "in_pkts": 6, "name": ("default-domain:demo:" "96d49cc3-4e01-40ce-9cac-c0e32642a442") }], "fip_stats_list": [{ "in_bytes": 33, "iface_name": ("default-domain:demo:" "96d49cc3-4e01-40ce-9cac-c0e32642a442"), "out_bytes": 44, "out_pkts": 10, "virtual_network": "default-domain:openstack:public", "in_pkts": 11, "ip_address": "1.1.1.1" }] }}}]} @staticmethod def fake_port_stats_with_node(): return {"value": [{ "name": "c588ebb7-ae52-485a-9f0c-b2791c5da196", "value": { "UveVirtualMachineAgent": { "if_stats_list": [ [[{ "out_bytes": 22, "in_bandwidth_usage": 0, "in_bytes": 23, "out_bandwidth_usage": 0, "out_pkts": 5, "in_pkts": 6, "name": ("default-domain:demo:" "96d49cc3-4e01-40ce-9cac-c0e32642a442") }], 'node1'], [[{ "out_bytes": 22, "in_bandwidth_usage": 0, "in_bytes": 23, "out_bandwidth_usage": 0, "out_pkts": 4, "in_pkts": 13, "name": ("default-domain:demo:" "96d49cc3-4e01-40ce-9cac-c0e32642a442")}], 'node2'] ] }}}]} def _test_meter(self, meter_name, expected, fake_port_stats=None): if not fake_port_stats: fake_port_stats = self.fake_port_stats() with mock.patch('ceilometer.network.' 'statistics.opencontrail.' 'client.NetworksAPIClient.' 'get_vm_statistics', return_value=fake_port_stats) as port_stats: samples = self.driver.get_sample_data(meter_name, self.parse_url, self.params, {}) self.assertEqual(expected, [s for s in samples]) port_stats.assert_called_with('*') def test_switch_port_receive_packets_with_node(self): expected = [(6, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'if_stats_list'}, mock.ANY), (13, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'if_stats_list'}, mock.ANY)] self._test_meter('switch.port.receive.packets', expected, self.fake_port_stats_with_node()) def test_switch_port_receive_packets(self): expected = [(6, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'if_stats_list'}, mock.ANY)] self._test_meter('switch.port.receive.packets', expected) def test_switch_port_transmit_packets(self): expected = [(5, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'if_stats_list'}, mock.ANY)] self._test_meter('switch.port.transmit.packets', expected) def test_switch_port_receive_bytes(self): expected = [(23, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'if_stats_list'}, mock.ANY)] self._test_meter('switch.port.receive.bytes', expected) def test_switch_port_transmit_bytes(self): expected = [(22, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'if_stats_list'}, mock.ANY)] self._test_meter('switch.port.transmit.bytes', expected) def test_switch_port_receive_packets_fip(self): self.params['resource'] = ['fip_stats_list'] expected = [(11, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'fip_stats_list'}, mock.ANY)] self._test_meter('switch.port.receive.packets', expected) def test_switch_port_transmit_packets_fip(self): self.params['resource'] = ['fip_stats_list'] expected = [(10, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'fip_stats_list'}, mock.ANY)] self._test_meter('switch.port.transmit.packets', expected) def test_switch_port_receive_bytes_fip(self): self.params['resource'] = ['fip_stats_list'] expected = [(33, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'fip_stats_list'}, mock.ANY)] self._test_meter('switch.port.receive.bytes', expected) def test_switch_port_transmit_bytes_fip(self): self.params['resource'] = ['fip_stats_list'] expected = [(44, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'fip_stats_list'}, mock.ANY)] self._test_meter('switch.port.transmit.bytes', expected) def test_switch_port_transmit_bytes_non_existing_network(self): self.params['virtual_network'] = ['aaa'] self.params['resource'] = ['fip_stats_list'] self._test_meter('switch.port.transmit.bytes', []) ceilometer-6.1.5/ceilometer/tests/unit/network/statistics/opencontrail/__init__.py0000664000567000056710000000000013072744703031753 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/network/statistics/test_table.py0000664000567000056710000000315113072744703027657 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network.statistics import table from ceilometer import sample from ceilometer.tests.unit.network import statistics class TestTablePollsters(statistics._PollsterTestBase): def test_table_pollster(self): self._test_pollster( table.TablePollster, 'switch.table', sample.TYPE_GAUGE, 'table') def test_table_pollster_active_entries(self): self._test_pollster( table.TablePollsterActiveEntries, 'switch.table.active.entries', sample.TYPE_GAUGE, 'entry') def test_table_pollster_lookup_packets(self): self._test_pollster( table.TablePollsterLookupPackets, 'switch.table.lookup.packets', sample.TYPE_GAUGE, 'packet') def test_table_pollster_matched_packets(self): self._test_pollster( table.TablePollsterMatchedPackets, 'switch.table.matched.packets', sample.TYPE_GAUGE, 'packet') ceilometer-6.1.5/ceilometer/tests/unit/network/statistics/__init__.py0000664000567000056710000000174413072744706027301 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base class _PollsterTestBase(base.BaseTestCase): def _test_pollster(self, pollster_class, meter_name, meter_type, meter_unit): pollster = pollster_class() self.assertEqual(pollster.meter_name, meter_name) self.assertEqual(pollster.meter_type, meter_type) self.assertEqual(pollster.meter_unit, meter_unit) ceilometer-6.1.5/ceilometer/tests/unit/network/services/0000775000567000056710000000000013072745164024612 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/network/services/test_lbaas_v2.py0000664000567000056710000003210413072744706027715 0ustar jenkinsjenkins00000000000000# # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_context import context from oslotest import base from oslotest import mockpatch from ceilometer.agent import manager from ceilometer.agent import plugin_base from ceilometer.network.services import discovery from ceilometer.network.services import lbaas class _BaseTestLBPollster(base.BaseTestCase): @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(_BaseTestLBPollster, self).setUp() self.addCleanup(mock.patch.stopall) self.context = context.get_admin_context() self.manager = manager.AgentManager() plugin_base._get_keystone = mock.Mock() catalog = (plugin_base._get_keystone.session.auth.get_access. return_value.service_catalog) catalog.get_endpoints = mock.MagicMock( return_value={'network': mock.ANY}) class TestLBListenerPollster(_BaseTestLBPollster): def setUp(self): super(TestLBListenerPollster, self).setUp() self.pollster = lbaas.LBListenerPollster() self.pollster.lb_version = 'v2' fake_listeners = self.fake_list_listeners() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'list_listener', return_value=fake_listeners)) @staticmethod def fake_list_listeners(): return [{'default_pool_id': None, 'protocol': 'HTTP', 'description': '', 'loadbalancers': [ {'id': 'a9729389-6147-41a3-ab22-a24aed8692b2'}], 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', 'name': 'mylistener_online', 'admin_state_up': True, 'connection_limit': 100, 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', 'protocol_port': 80, 'operating_status': 'ONLINE'}, {'default_pool_id': None, 'protocol': 'HTTP', 'description': '', 'loadbalancers': [ {'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a'}], 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylistener_offline', 'admin_state_up': True, 'connection_limit': 100, 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', 'protocol_port': 80, 'operating_status': 'OFFLINE'}, {'default_pool_id': None, 'protocol': 'HTTP', 'description': '', 'loadbalancers': [ {'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], 'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'name': 'mylistener_error', 'admin_state_up': True, 'connection_limit': 100, 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', 'protocol_port': 80, 'operating_status': 'ERROR'}, {'default_pool_id': None, 'protocol': 'HTTP', 'description': '', 'loadbalancers': [ {'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], 'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'name': 'mylistener_pending_create', 'admin_state_up': True, 'connection_limit': 100, 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', 'protocol_port': 80, 'operating_status': 'PENDING_CREATE'} ] def test_listener_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_list_listeners())) self.assertEqual(3, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_list_listeners()[0][field], samples[0].resource_metadata[field]) def test_listener_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_list_listeners())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(4, samples[2].volume) def test_list_listener_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_list_listeners())) self.assertEqual(set(['network.services.lb.listener']), set([s.name for s in samples])) def test_listener_discovery(self): discovered_listeners = discovery.LBListenersDiscovery().discover( self.manager) self.assertEqual(4, len(discovered_listeners)) for listener in self.fake_list_listeners(): if listener['operating_status'] == 'pending_create': self.assertNotIn(listener, discovered_listeners) else: self.assertIn(listener, discovered_listeners) class TestLBLoadBalancerPollster(_BaseTestLBPollster): def setUp(self): super(TestLBLoadBalancerPollster, self).setUp() self.pollster = lbaas.LBLoadBalancerPollster() self.pollster.lb_version = 'v2' fake_loadbalancers = self.fake_list_loadbalancers() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'list_loadbalancer', return_value=fake_loadbalancers)) @staticmethod def fake_list_loadbalancers(): return [{'operating_status': 'ONLINE', 'description': '', 'admin_state_up': True, 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'provisioning_status': 'ACTIVE', 'listeners': [{'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], 'vip_address': '10.0.0.2', 'vip_subnet_id': '013d3059-87a4-45a5-91e9-d721068ae0b2', 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'loadbalancer_online'}, {'operating_status': 'OFFLINE', 'description': '', 'admin_state_up': True, 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'provisioning_status': 'INACTIVE', 'listeners': [{'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a'}], 'vip_address': '10.0.0.3', 'vip_subnet_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'name': 'loadbalancer_offline'}, {'operating_status': 'ERROR', 'description': '', 'admin_state_up': True, 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'provisioning_status': 'INACTIVE', 'listeners': [{'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d8b'}], 'vip_address': '10.0.0.4', 'vip_subnet_id': '213d3059-87a4-45a5-91e9-d721068df0b2', 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'loadbalancer_error'}, {'operating_status': 'PENDING_CREATE', 'description': '', 'admin_state_up': True, 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'provisioning_status': 'INACTIVE', 'listeners': [{'id': 'fe7rad36-437d-4c84-aee1-186027d4ed7c'}], 'vip_address': '10.0.0.5', 'vip_subnet_id': '123d3059-87a4-45a5-91e9-d721068ae0c3', 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395763b2', 'name': 'loadbalancer_pending_create'} ] def test_loadbalancer_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_list_loadbalancers())) self.assertEqual(3, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_list_loadbalancers()[0][field], samples[0].resource_metadata[field]) def test_loadbalancer_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_list_loadbalancers())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(4, samples[2].volume) def test_list_loadbalancer_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_list_loadbalancers())) self.assertEqual(set(['network.services.lb.loadbalancer']), set([s.name for s in samples])) def test_loadbalancer_discovery(self): discovered_loadbalancers = \ discovery.LBLoadBalancersDiscovery().discover(self.manager) self.assertEqual(4, len(discovered_loadbalancers)) for loadbalancer in self.fake_list_loadbalancers(): if loadbalancer['operating_status'] == 'pending_create': self.assertNotIn(loadbalancer, discovered_loadbalancers) else: self.assertIn(loadbalancer, discovered_loadbalancers) class TestLBStatsPollster(_BaseTestLBPollster): def setUp(self): super(TestLBStatsPollster, self).setUp() fake_balancer_stats = self.fake_balancer_stats() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'get_loadbalancer_stats', return_value=fake_balancer_stats)) fake_loadbalancers = self.fake_list_loadbalancers() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'list_loadbalancer', return_value=fake_loadbalancers)) cfg.CONF.set_override('neutron_lbaas_version', 'v2', group='service_types') @staticmethod def fake_list_loadbalancers(): return [{'operating_status': 'ONLINE', 'description': '', 'admin_state_up': True, 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'provisioning_status': 'ACTIVE', 'listeners': [{'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], 'vip_address': '10.0.0.2', 'vip_subnet_id': '013d3059-87a4-45a5-91e9-d721068ae0b2', 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'loadbalancer_online'}, ] @staticmethod def fake_balancer_stats(): return {'active_connections': 2, 'bytes_in': 1, 'bytes_out': 3, 'total_connections': 4} @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def _check_get_samples(self, factory, sample_name, expected_volume, expected_type): pollster = factory() cache = {} samples = list(pollster.get_samples(self.manager, cache, self.fake_list_loadbalancers())) self.assertEqual(1, len(samples)) self.assertIsNotNone(samples) self.assertIn('lbstats', cache) self.assertEqual(set([sample_name]), set([s.name for s in samples])) match = [s for s in samples if s.name == sample_name] self.assertEqual(1, len(match), 'missing counter %s' % sample_name) self.assertEqual(expected_volume, match[0].volume) self.assertEqual(expected_type, match[0].type) def test_lb_total_connections(self): self._check_get_samples(lbaas.LBTotalConnectionsPollster, 'network.services.lb.total.connections', 4, 'cumulative') def test_lb_active_connections(self): self._check_get_samples(lbaas.LBActiveConnectionsPollster, 'network.services.lb.active.connections', 2, 'gauge') def test_lb_incoming_bytes(self): self._check_get_samples(lbaas.LBBytesInPollster, 'network.services.lb.incoming.bytes', 1, 'gauge') def test_lb_outgoing_bytes(self): self._check_get_samples(lbaas.LBBytesOutPollster, 'network.services.lb.outgoing.bytes', 3, 'gauge') ceilometer-6.1.5/ceilometer/tests/unit/network/services/test_lbaas.py0000664000567000056710000005204213072744706027311 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_context import context from oslotest import base from oslotest import mockpatch from ceilometer.agent import manager from ceilometer.agent import plugin_base from ceilometer.network.services import discovery from ceilometer.network.services import lbaas class _BaseTestLBPollster(base.BaseTestCase): @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(_BaseTestLBPollster, self).setUp() self.addCleanup(mock.patch.stopall) self.context = context.get_admin_context() self.manager = manager.AgentManager() cfg.CONF.set_override('neutron_lbaas_version', 'v1', group='service_types') plugin_base._get_keystone = mock.Mock() catalog = (plugin_base._get_keystone.session.auth.get_access. return_value.service_catalog) catalog.get_endpoints = mock.MagicMock( return_value={'network': mock.ANY}) class TestLBPoolPollster(_BaseTestLBPollster): def setUp(self): super(TestLBPoolPollster, self).setUp() self.pollster = lbaas.LBPoolPollster() fake_pools = self.fake_get_pools() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'pool_get_all', return_value=fake_pools)) @staticmethod def fake_get_pools(): return [{'status': 'ACTIVE', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'provider': 'haproxy', 'status_description': None, 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, {'status': 'INACTIVE', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'provider': 'haproxy', 'status_description': None, 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb02', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, {'status': 'PENDING_CREATE', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'provider': 'haproxy', 'status_description': None, 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb03', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, {'status': 'UNKNOWN', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'provider': 'haproxy', 'status_description': None, 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb03', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, {'status': 'error', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'provider': 'haproxy', 'status_description': None, 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb_error', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, ] def test_pool_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_pools())) self.assertEqual(4, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_pools()[0][field], samples[0].resource_metadata[field]) def test_pool_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_pools())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(2, samples[2].volume) def test_get_pool_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_pools())) self.assertEqual(set(['network.services.lb.pool']), set([s.name for s in samples])) def test_pool_discovery(self): discovered_pools = discovery.LBPoolsDiscovery().discover(self.manager) self.assertEqual(4, len(discovered_pools)) for pool in self.fake_get_pools(): if pool['status'] == 'error': self.assertNotIn(pool, discovered_pools) else: self.assertIn(pool, discovered_pools) class TestLBVipPollster(_BaseTestLBPollster): def setUp(self): super(TestLBVipPollster, self).setUp() self.pollster = lbaas.LBVipPollster() fake_vips = self.fake_get_vips() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'vip_get_all', return_value=fake_vips)) @staticmethod def fake_get_vips(): return [{'status': 'ACTIVE', 'status_description': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'connection_limit': -1, 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'session_persistence': None, 'address': '10.0.0.2', 'protocol_port': 80, 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'myvip'}, {'status': 'INACTIVE', 'status_description': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'connection_limit': -1, 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'session_persistence': None, 'address': '10.0.0.3', 'protocol_port': 80, 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', 'id': 'ba6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'myvip02'}, {'status': 'PENDING_CREATE', 'status_description': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'connection_limit': -1, 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'session_persistence': None, 'address': '10.0.0.4', 'protocol_port': 80, 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'myvip03'}, {'status': 'UNKNOWN', 'status_description': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'connection_limit': -1, 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'session_persistence': None, 'address': '10.0.0.8', 'protocol_port': 80, 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'myvip03'}, {'status': 'error', 'status_description': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'connection_limit': -1, 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'session_persistence': None, 'address': '10.0.0.8', 'protocol_port': 80, 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'myvip_error'}, ] def test_vip_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_vips())) self.assertEqual(4, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_vips()[0][field], samples[0].resource_metadata[field]) def test_pool_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_vips())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(2, samples[2].volume) def test_get_vip_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_vips())) self.assertEqual(set(['network.services.lb.vip']), set([s.name for s in samples])) def test_vip_discovery(self): discovered_vips = discovery.LBVipsDiscovery().discover(self.manager) self.assertEqual(4, len(discovered_vips)) for pool in self.fake_get_vips(): if pool['status'] == 'error': self.assertNotIn(pool, discovered_vips) else: self.assertIn(pool, discovered_vips) class TestLBMemberPollster(_BaseTestLBPollster): def setUp(self): super(TestLBMemberPollster, self).setUp() self.pollster = lbaas.LBMemberPollster() fake_members = self.fake_get_members() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'member_get_all', return_value=fake_members)) @staticmethod def fake_get_members(): return [{'status': 'ACTIVE', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'address': '10.0.0.3', 'status_description': None, 'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'}, {'status': 'INACTIVE', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'address': '10.0.0.5', 'status_description': None, 'id': '2456661eb-07bc-4372-9fbf-36459dd0f96b'}, {'status': 'PENDING_CREATE', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'address': '10.0.0.6', 'status_description': None, 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, {'status': 'UNKNOWN', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'address': '10.0.0.6', 'status_description': None, 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, {'status': 'error', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'address': '10.0.0.6', 'status_description': None, 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, ] def test_get_samples_not_empty(self): samples = list(self.pollster.get_samples( self.manager, {}, self.fake_get_members())) self.assertEqual(4, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_members()[0][field], samples[0].resource_metadata[field]) def test_pool_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, self.fake_get_members())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(2, samples[2].volume) def test_get_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, self.fake_get_members())) self.assertEqual(set(['network.services.lb.member']), set([s.name for s in samples])) def test_members_discovery(self): discovered_members = discovery.LBMembersDiscovery().discover( self.manager) self.assertEqual(4, len(discovered_members)) for pool in self.fake_get_members(): if pool['status'] == 'error': self.assertNotIn(pool, discovered_members) else: self.assertIn(pool, discovered_members) class TestLBHealthProbePollster(_BaseTestLBPollster): def setUp(self): super(TestLBHealthProbePollster, self).setUp() self.pollster = lbaas.LBHealthMonitorPollster() fake_health_monitor = self.fake_get_health_monitor() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'health_monitor_get_all', return_value=fake_health_monitor)) @staticmethod def fake_get_health_monitor(): return [{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365', 'admin_state_up': True, 'tenant_id': "d5d2817dae6b42159be9b665b64beb0e", 'delay': 2, 'max_retries': 5, 'timeout': 5, 'pools': [], 'type': 'PING', }] def test_get_samples_not_empty(self): samples = list(self.pollster.get_samples( self.manager, {}, self.fake_get_health_monitor())) self.assertEqual(1, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_health_monitor()[0][field], samples[0].resource_metadata[field]) def test_get_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, self.fake_get_health_monitor())) self.assertEqual(set(['network.services.lb.health_monitor']), set([s.name for s in samples])) def test_probes_discovery(self): discovered_probes = discovery.LBHealthMonitorsDiscovery().discover( self.manager) self.assertEqual(discovered_probes, self.fake_get_health_monitor()) class TestLBStatsPollster(_BaseTestLBPollster): def setUp(self): super(TestLBStatsPollster, self).setUp() fake_pool_stats = self.fake_pool_stats() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'pool_stats', return_value=fake_pool_stats)) fake_pools = self.fake_get_pools() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'pool_get_all', return_value=fake_pools)) @staticmethod def fake_get_pools(): return [{'status': 'ACTIVE', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'provider': 'haproxy', 'status_description': None, 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, ] @staticmethod def fake_pool_stats(): return {'stats': {'active_connections': 2, 'bytes_in': 1, 'bytes_out': 3, 'total_connections': 4 } } @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def _check_get_samples(self, factory, sample_name, expected_volume, expected_type): pollster = factory() cache = {} samples = list(pollster.get_samples(self.manager, cache, self.fake_get_pools())) self.assertEqual(1, len(samples)) self.assertIsNotNone(samples) self.assertIn('lbstats', cache) self.assertEqual(set([sample_name]), set([s.name for s in samples])) match = [s for s in samples if s.name == sample_name] self.assertEqual(1, len(match), 'missing counter %s' % sample_name) self.assertEqual(expected_volume, match[0].volume) self.assertEqual(expected_type, match[0].type) def test_lb_total_connections(self): self._check_get_samples(lbaas.LBTotalConnectionsPollster, 'network.services.lb.total.connections', 4, 'cumulative') def test_lb_active_connections(self): self._check_get_samples(lbaas.LBActiveConnectionsPollster, 'network.services.lb.active.connections', 2, 'gauge') def test_lb_incoming_bytes(self): self._check_get_samples(lbaas.LBBytesInPollster, 'network.services.lb.incoming.bytes', 1, 'gauge') def test_lb_outgoing_bytes(self): self._check_get_samples(lbaas.LBBytesOutPollster, 'network.services.lb.outgoing.bytes', 3, 'gauge') ceilometer-6.1.5/ceilometer/tests/unit/network/services/test_vpnaas.py0000664000567000056710000001646213072744706027525 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_context import context from oslotest import base from oslotest import mockpatch from ceilometer.agent import manager from ceilometer.agent import plugin_base from ceilometer.network.services import discovery from ceilometer.network.services import vpnaas class _BaseTestVPNPollster(base.BaseTestCase): @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(_BaseTestVPNPollster, self).setUp() self.addCleanup(mock.patch.stopall) self.context = context.get_admin_context() self.manager = manager.AgentManager() plugin_base._get_keystone = mock.Mock() catalog = (plugin_base._get_keystone.session.auth.get_access. return_value.service_catalog) catalog.get_endpoints = mock.MagicMock( return_value={'network': mock.ANY}) class TestVPNServicesPollster(_BaseTestVPNPollster): def setUp(self): super(TestVPNServicesPollster, self).setUp() self.pollster = vpnaas.VPNServicesPollster() fake_vpn = self.fake_get_vpn_service() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'vpn_get_all', return_value=fake_vpn)) @staticmethod def fake_get_vpn_service(): return [{'status': 'ACTIVE', 'name': 'myvpn', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, {'status': 'INACTIVE', 'name': 'myvpn', 'description': '', 'admin_state_up': True, 'id': 'cdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, {'status': 'PENDING_CREATE', 'name': 'myvpn', 'description': '', 'id': 'bdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, {'status': 'error', 'name': 'myvpn', 'description': '', 'id': 'edde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'admin_state_up': False, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, ] def test_vpn_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_vpn_service())) self.assertEqual(4, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_vpn_service()[0][field], samples[0].resource_metadata[field]) def test_vpn_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_vpn_service())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(2, samples[2].volume) def test_get_vpn_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_vpn_service())) self.assertEqual(set(['network.services.vpn']), set([s.name for s in samples])) def test_vpn_discovery(self): discovered_vpns = discovery.VPNServicesDiscovery().discover( self.manager) self.assertEqual(3, len(discovered_vpns)) for vpn in self.fake_get_vpn_service(): if vpn['status'] == 'error': self.assertNotIn(vpn, discovered_vpns) else: self.assertIn(vpn, discovered_vpns) class TestIPSecConnectionsPollster(_BaseTestVPNPollster): def setUp(self): super(TestIPSecConnectionsPollster, self).setUp() self.pollster = vpnaas.IPSecConnectionsPollster() fake_conns = self.fake_get_ipsec_connections() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'ipsec_site_connections_get_all', return_value=fake_conns)) @staticmethod def fake_get_ipsec_connections(): return [{'name': 'connection1', 'description': 'Remote-connection1', 'peer_address': '192.168.1.10', 'peer_id': '192.168.1.10', 'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'], 'mtu': 1500, 'psk': 'abcd', 'initiator': 'bi-directional', 'dpd': { 'action': 'hold', 'interval': 30, 'timeout': 120}, 'ikepolicy_id': 'ade3d818-fdcb-fg4b-de7f-4550dc8a9d7a', 'ipsecpolicy_id': 'fce3d818-fdcb-fg4b-de7f-7850dc8a9d7a', 'vpnservice_id': 'dce3d818-fdcb-fg4b-de7f-5650dc8a9d7a', 'admin_state_up': True, 'tenant_id': 'abe3d818-fdcb-fg4b-de7f-6650dc8a9d7a', 'id': 'fdfbcec-fdcb-fg4b-de7f-6650dc8a9d7a'} ] def test_conns_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_ipsec_connections())) self.assertEqual(1, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_ipsec_connections()[0][field], samples[0].resource_metadata[field]) def test_get_conns_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_ipsec_connections())) self.assertEqual(set(['network.services.vpn.connections']), set([s.name for s in samples])) def test_conns_discovery(self): discovered_conns = discovery.IPSecConnectionsDiscovery().discover( self.manager) self.assertEqual(1, len(discovered_conns)) self.assertEqual(self.fake_get_ipsec_connections(), discovered_conns) ceilometer-6.1.5/ceilometer/tests/unit/network/services/test_fwaas.py0000664000567000056710000001577113072744706027340 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_context import context from oslotest import base from oslotest import mockpatch from ceilometer.agent import manager from ceilometer.agent import plugin_base from ceilometer.network.services import discovery from ceilometer.network.services import fwaas class _BaseTestFWPollster(base.BaseTestCase): @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(_BaseTestFWPollster, self).setUp() self.addCleanup(mock.patch.stopall) self.context = context.get_admin_context() self.manager = manager.AgentManager() plugin_base._get_keystone = mock.Mock() catalog = (plugin_base._get_keystone.session.auth.get_access. return_value.service_catalog) catalog.get_endpoints = mock.MagicMock( return_value={'network': mock.ANY}) class TestFirewallPollster(_BaseTestFWPollster): def setUp(self): super(TestFirewallPollster, self).setUp() self.pollster = fwaas.FirewallPollster() fake_fw = self.fake_get_fw_service() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'firewall_get_all', return_value=fake_fw)) @staticmethod def fake_get_fw_service(): return [{'status': 'ACTIVE', 'name': 'myfw', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, {'status': 'INACTIVE', 'name': 'myfw', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, {'status': 'PENDING_CREATE', 'name': 'myfw', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, {'status': 'error', 'name': 'myfw', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, ] def test_fw_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_service())) self.assertEqual(4, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_fw_service()[0][field], samples[0].resource_metadata[field]) def test_vpn_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_service())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(2, samples[2].volume) def test_get_vpn_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_service())) self.assertEqual(set(['network.services.firewall']), set([s.name for s in samples])) def test_vpn_discovery(self): discovered_fws = discovery.FirewallDiscovery().discover(self.manager) self.assertEqual(3, len(discovered_fws)) for vpn in self.fake_get_fw_service(): if vpn['status'] == 'error': self.assertNotIn(vpn, discovered_fws) else: self.assertIn(vpn, discovered_fws) class TestIPSecConnectionsPollster(_BaseTestFWPollster): def setUp(self): super(TestIPSecConnectionsPollster, self).setUp() self.pollster = fwaas.FirewallPolicyPollster() fake_fw_policy = self.fake_get_fw_policy() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'fw_policy_get_all', return_value=fake_fw_policy)) @staticmethod def fake_get_fw_policy(): return [{'name': 'my_fw_policy', 'description': 'fw_policy', 'admin_state_up': True, 'tenant_id': 'abe3d818-fdcb-fg4b-de7f-6650dc8a9d7a', 'firewall_rules': [{'enabled': True, 'action': 'allow', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '80', 'source_ip_address': '10.24.4.2'}, {'enabled': True, 'action': 'deny', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '22'}], 'shared': True, 'audited': True, 'id': 'fdfbcec-fdcb-fg4b-de7f-6650dc8a9d7a'} ] def test_policy_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_policy())) self.assertEqual(1, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_fw_policy()[0][field], samples[0].resource_metadata[field]) def test_get_policy_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_policy())) self.assertEqual(set(['network.services.firewall.policy']), set([s.name for s in samples])) def test_fw_policy_discovery(self): discovered_policy = discovery.FirewallPolicyDiscovery().discover( self.manager) self.assertEqual(1, len(discovered_policy)) self.assertEqual(self.fake_get_fw_policy(), discovered_policy) ceilometer-6.1.5/ceilometer/tests/unit/network/services/__init__.py0000664000567000056710000000000013072744703026707 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/network/__init__.py0000664000567000056710000000000013072744703025064 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/unit/network/test_notifications.py0000664000567000056710000017514013072744706027262 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer.network.notifications """ import mock from ceilometer.network import notifications from ceilometer.tests import base as test NOTIFICATION_NETWORK_CREATE = { u'_context_roles': [u'anotherrole', u'Member'], u'_context_read_deleted': u'no', u'event_type': u'network.create.end', u'timestamp': u'2012-09-27 14:11:27.086575', u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'payload': {u'network': {u'status': u'ACTIVE', u'subnets': [], u'name': u'abcedf', u'router:external': False, u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'admin_state_up': True, u'shared': False, u'id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be'}}, u'priority': u'INFO', u'_context_is_admin': False, u'_context_timestamp': u'2012-09-27 14:11:26.924779', u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', u'publisher_id': u'network.ubuntu-VirtualBox', u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} NOTIFICATION_BULK_NETWORK_CREATE = { '_context_roles': [u'_member_', u'heat_stack_owner', u'admin'], u'_context_request_id': u'req-a2dfdefd-b773-4400-9d52-5e146e119950', u'_context_read_deleted': u'no', u'event_type': u'network.create.end', u'_context_user_name': u'admin', u'_context_project_name': u'admin', u'timestamp': u'2014-05-1510: 24: 56.335612', u'_context_tenant_id': u'980ec4870033453ead65c0470a78b8a8', u'_context_tenant_name': u'admin', u'_context_tenant': u'980ec4870033453ead65c0470a78b8a8', u'message_id': u'914eb601-9390-4a72-8629-f013a4c84467', u'priority': 'info', u'_context_is_admin': True, u'_context_project_id': u'980ec4870033453ead65c0470a78b8a8', u'_context_timestamp': u'2014-05-1510: 24: 56.285975', u'_context_user': u'7520940056d54cceb25cbce888300bea', u'_context_user_id': u'7520940056d54cceb25cbce888300bea', u'publisher_id': u'network.devstack', u'payload': { u'networks': [{u'status': u'ACTIVE', u'subnets': [], u'name': u'test2', u'provider: physical_network': None, u'admin_state_up': True, u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', u'provider: network_type': u'local', u'shared': False, u'id': u'7cbc7a66-bbd0-41fc-a186-81c3da5c9843', u'provider: segmentation_id': None}, {u'status': u'ACTIVE', u'subnets': [], u'name': u'test3', u'provider: physical_network': None, u'admin_state_up': True, u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', u'provider: network_type': u'local', u'shared': False, u'id': u'5a7cb86f-1638-4cc1-8dcc-8bbbc8c7510d', u'provider: segmentation_id': None}] } } NOTIFICATION_SUBNET_CREATE = { u'_context_roles': [u'anotherrole', u'Member'], u'_context_read_deleted': u'no', u'event_type': u'subnet.create.end', u'timestamp': u'2012-09-27 14:11:27.426620', u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'payload': { u'subnet': { u'name': u'mysubnet', u'enable_dhcp': True, u'network_id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be', u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'dns_nameservers': [], u'allocation_pools': [{u'start': u'192.168.42.2', u'end': u'192.168.42.254'}], u'host_routes': [], u'ip_version': 4, u'gateway_ip': u'192.168.42.1', u'cidr': u'192.168.42.0/24', u'id': u'1a3a170d-d7ce-4cc9-b1db-621da15a25f5'}}, u'priority': u'INFO', u'_context_is_admin': False, u'_context_timestamp': u'2012-09-27 14:11:27.214490', u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', u'publisher_id': u'network.ubuntu-VirtualBox', u'message_id': u'd86dfc66-d3c3-4aea-b06d-bf37253e6116'} NOTIFICATION_BULK_SUBNET_CREATE = { '_context_roles': [u'_member_', u'heat_stack_owner', u'admin'], u'_context_request_id': u'req-b77e278a-0cce-4987-9f82-15957b234768', u'_context_read_deleted': u'no', u'event_type': u'subnet.create.end', u'_context_user_name': u'admin', u'_context_project_name': u'admin', u'timestamp': u'2014-05-1510: 47: 08.133888', u'_context_tenant_id': u'980ec4870033453ead65c0470a78b8a8', u'_context_tenant_name': u'admin', u'_context_tenant': u'980ec4870033453ead65c0470a78b8a8', u'message_id': u'c7e6f9fd-ead2-415f-8493-b95bedf72e43', u'priority': u'info', u'_context_is_admin': True, u'_context_project_id': u'980ec4870033453ead65c0470a78b8a8', u'_context_timestamp': u'2014-05-1510: 47: 07.970043', u'_context_user': u'7520940056d54cceb25cbce888300bea', u'_context_user_id': u'7520940056d54cceb25cbce888300bea', u'publisher_id': u'network.devstack', u'payload': { u'subnets': [{u'name': u'', u'enable_dhcp': True, u'network_id': u'3ddfe60b-34b4-4e9d-9440-43c904b1c58e', u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', u'dns_nameservers': [], u'ipv6_ra_mode': None, u'allocation_pools': [{u'start': u'10.0.4.2', u'end': u'10.0.4.254'}], u'host_routes': [], u'ipv6_address_mode': None, u'ip_version': 4, u'gateway_ip': u'10.0.4.1', u'cidr': u'10.0.4.0/24', u'id': u'14020d7b-6dd7-4349-bb8e-8f954c919022'}, {u'name': u'', u'enable_dhcp': True, u'network_id': u'3ddfe60b-34b4-4e9d-9440-43c904b1c58e', u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', u'dns_nameservers': [], u'ipv6_ra_mode': None, u'allocation_pools': [{u'start': u'10.0.5.2', u'end': u'10.0.5.254'}], u'host_routes': [], u'ipv6_address_mode': None, u'ip_version': 4, u'gateway_ip': u'10.0.5.1', u'cidr': u'10.0.5.0/24', u'id': u'a080991b-a32a-4bf7-a558-96c4b77d075c'}] } } NOTIFICATION_PORT_CREATE = { u'_context_roles': [u'anotherrole', u'Member'], u'_context_read_deleted': u'no', u'event_type': u'port.create.end', u'timestamp': u'2012-09-27 14:28:31.536370', u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'payload': { u'port': { u'status': u'ACTIVE', u'name': u'', u'admin_state_up': True, u'network_id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be', u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'device_owner': u'', u'mac_address': u'fa:16:3e:75:0c:49', u'fixed_ips': [{ u'subnet_id': u'1a3a170d-d7ce-4cc9-b1db-621da15a25f5', u'ip_address': u'192.168.42.3'}], u'id': u'9cdfeb92-9391-4da7-95a1-ca214831cfdb', u'device_id': u''}}, u'priority': u'INFO', u'_context_is_admin': False, u'_context_timestamp': u'2012-09-27 14:28:31.438919', u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', u'publisher_id': u'network.ubuntu-VirtualBox', u'message_id': u'7135b8ab-e13c-4ac8-bc31-75e7f756622a'} NOTIFICATION_BULK_PORT_CREATE = { u'_context_roles': [u'_member_', u'SwiftOperator'], u'_context_request_id': u'req-678be9ad-c399-475a-b3e8-8da0c06375aa', u'_context_read_deleted': u'no', u'event_type': u'port.create.end', u'_context_project_name': u'demo', u'timestamp': u'2014-05-0909: 19: 58.317548', u'_context_tenant_id': u'133087d90fc149528b501dd8b75ea965', u'_context_timestamp': u'2014-05-0909: 19: 58.160011', u'_context_tenant': u'133087d90fc149528b501dd8b75ea965', u'payload': { u'ports': [{u'status': u'DOWN', u'name': u'port--1501135095', u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id': u'acf63fdc-b43b-475d-8cca-9429b843d5e8', u'tenant_id': u'133087d90fc149528b501dd8b75ea965', u'binding: vnic_type': u'normal', u'device_owner': u'', u'mac_address': u'fa: 16: 3e: 37: 10: 39', u'fixed_ips': [], u'id': u'296c2c9f-14e9-48da-979d-78b213454c59', u'security_groups': [ u'a06f7c9d-9e5a-46b0-9f6c-ce812aa2e5ff'], u'device_id': u''}, {u'status': u'DOWN', u'name': u'', u'allowed_address_pairs': [], u'admin_state_up': False, u'network_id': u'0a8eea59-0146-425c-b470-e9ddfa99ec61', u'tenant_id': u'133087d90fc149528b501dd8b75ea965', u'binding: vnic_type': u'normal', u'device_owner': u'', u'mac_address': u'fa: 16: 3e: 8e: 6e: 53', u'fixed_ips': [], u'id': u'd8bb667f-5cd3-4eca-a984-268e25b1b7a5', u'security_groups': [ u'a06f7c9d-9e5a-46b0-9f6c-ce812aa2e5ff'], u'device_id': u''}] }, u'_unique_id': u'60b1650f17fc4fa59492f447321fb26c', u'_context_is_admin': False, u'_context_project_id': u'133087d90fc149528b501dd8b75ea965', u'_context_tenant_name': u'demo', u'_context_user': u'b1eb48f9c54741f4adc1b4ea512d400c', u'_context_user_name': u'demo', u'publisher_id': u'network.os-ci-test12', u'message_id': u'04aa45e1-3c30-4c69-8638-e7ff8621e9bc', u'_context_user_id': u'b1eb48f9c54741f4adc1b4ea512d400c', u'priority': u'INFO' } NOTIFICATION_PORT_UPDATE = { u'_context_roles': [u'anotherrole', u'Member'], u'_context_read_deleted': u'no', u'event_type': u'port.update.end', u'timestamp': u'2012-09-27 14:35:09.514052', u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'payload': { u'port': { u'status': u'ACTIVE', u'name': u'bonjour', u'admin_state_up': True, u'network_id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be', u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'device_owner': u'', u'mac_address': u'fa:16:3e:75:0c:49', u'fixed_ips': [{ u'subnet_id': u'1a3a170d-d7ce-4cc9-b1db-621da15a25f5', u'ip_address': u'192.168.42.3'}], u'id': u'9cdfeb92-9391-4da7-95a1-ca214831cfdb', u'device_id': u''}}, u'priority': u'INFO', u'_context_is_admin': False, u'_context_timestamp': u'2012-09-27 14:35:09.447682', u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', u'publisher_id': u'network.ubuntu-VirtualBox', u'message_id': u'07b0a3a1-c0b5-40ab-a09c-28dee6bf48f4'} NOTIFICATION_NETWORK_EXISTS = { u'_context_roles': [u'anotherrole', u'Member'], u'_context_read_deleted': u'no', u'event_type': u'network.exists', u'timestamp': u'2012-09-27 14:11:27.086575', u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'payload': {u'network': {u'status': u'ACTIVE', u'subnets': [], u'name': u'abcedf', u'router:external': False, u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'admin_state_up': True, u'shared': False, u'id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be'}}, u'priority': u'INFO', u'_context_is_admin': False, u'_context_timestamp': u'2012-09-27 14:11:26.924779', u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', u'publisher_id': u'network.ubuntu-VirtualBox', u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} NOTIFICATION_ROUTER_EXISTS = { u'_context_roles': [u'anotherrole', u'Member'], u'_context_read_deleted': u'no', u'event_type': u'router.exists', u'timestamp': u'2012-09-27 14:11:27.086575', u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'payload': {u'router': {'status': u'ACTIVE', 'external_gateway_info': {'network_id': u'89d55642-4dec-43a4-a617-6cec051393b5'}, 'name': u'router1', 'admin_state_up': True, 'tenant_id': u'bb04a2b769c94917b57ba49df7783cfd', 'id': u'ab8bb3ed-df23-4ca0-8f03-b887abcd5c23'}}, u'priority': u'INFO', u'_context_is_admin': False, u'_context_timestamp': u'2012-09-27 14:11:26.924779', u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', u'publisher_id': u'network.ubuntu-VirtualBox', u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} NOTIFICATION_FLOATINGIP_EXISTS = { u'_context_roles': [u'anotherrole', u'Member'], u'_context_read_deleted': u'no', u'event_type': u'floatingip.exists', u'timestamp': u'2012-09-27 14:11:27.086575', u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'payload': {u'floatingip': {'router_id': None, 'tenant_id': u'6e5f9df9b3a249ab834f25fe1b1b81fd', 'floating_network_id': u'001400f7-1710-4245-98c3-39ba131cc39a', 'fixed_ip_address': None, 'floating_ip_address': u'172.24.4.227', 'port_id': None, 'id': u'2b7cc28c-6f78-4735-9246-257168405de6'}}, u'priority': u'INFO', u'_context_is_admin': False, u'_context_timestamp': u'2012-09-27 14:11:26.924779', u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', u'publisher_id': u'network.ubuntu-VirtualBox', u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} NOTIFICATION_FLOATINGIP_UPDATE_START = { '_context_roles': [u'_member_', u'admin', u'heat_stack_owner'], '_context_request_id': u'req-bd5ed336-242f-4705-836e-8e8f3d0d1ced', '_context_read_deleted': u'no', 'event_type': u'floatingip.update.start', '_context_user_name': u'admin', '_context_project_name': u'admin', 'timestamp': u'2014-05-3107: 19: 43.463101', '_context_tenant_id': u'9fc714821a3747c8bc4e3a9bfbe82732', '_context_tenant_name': u'admin', '_context_tenant': u'9fc714821a3747c8bc4e3a9bfbe82732', 'message_id': u'0ab6d71f-ba0a-4501-86fe-6cc20521ef5a', 'priority': 'info', '_context_is_admin': True, '_context_project_id': u'9fc714821a3747c8bc4e3a9bfbe82732', '_context_timestamp': u'2014-05-3107: 19: 43.460767', '_context_user': u'6ca7b13b33e4425cae0b85e2cf93d9a1', '_context_user_id': u'6ca7b13b33e4425cae0b85e2cf93d9a1', 'publisher_id': u'network.devstack', 'payload': { u'id': u'64262b2a-8f5d-4ade-9405-0cbdd03c1555', u'floatingip': { u'fixed_ip_address': u'172.24.4.227', u'port_id': u'8ab815c8-03cc-4b45-a673-79bdd0c258f2' } } } NOTIFICATION_POOL_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-10715057-7590-4529-8020-b994295ee6f4", "event_type": "pool.create.end", "timestamp": "2014-09-15 17:20:50.687649", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "ce255443233748ce9cc71b480974df28", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "pool": { "status": "ACTIVE", "lb_method": "ROUND_ROBIN", "protocol": "HTTP", "description": "", "health_monitors": [], "members": [], "status_description": None, "id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", "vip_id": None, "name": "my_pool", "admin_state_up": True, "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "health_monitors_status": [], "provider": "haproxy"}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:20:49.600299", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "0a5ed7a6-e516-4aed-9968-4ee9f1b65cc2"} NOTIFICATION_VIP_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "vip.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "vip": { "status": "ACTIVE", "protocol": "HTTP", "description": "", "address": "10.0.0.2", "protocol_port": 80, "port_id": "2b5dd476-11da-4d46-9f1e-7a75436062f6", "id": "87a5ce35-f278-47f3-8990-7f695f52f9bf", "status_description": None, "name": "my_vip", "admin_state_up": True, "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "connection_limit": -1, "pool_id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", "session_persistence": {"type": "SOURCE_IP"}}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "3895ad11-98a3-4031-92af-f76e96736661"} NOTIFICATION_HEALTH_MONITORS_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "health_monitor.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "health_monitor": { "admin_state_up": True, "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "delay": 10, "max_retries": 10, "timeout": 10, "pools": [], "type": "PING", "id": "6dea2d01-c3af-4696-9192-6c938f391f01"}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} NOTIFICATION_MEMBERS_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "member.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "member": {"admin_state_up": True, "status": "ACTIVE", "status_description": None, "weight": 1, "address": "10.0.0.3", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "protocol_port": 80, "id": "5e32f960-63ae-4a93-bfa2-339aa83d82ce", "pool_id": "6b73b9f8-d807-4553-87df-eb34cdd08070"}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} NOTIFICATION_FIREWALL_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "firewall.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "firewall": { "status": "ACTIVE", "name": "my_firewall", "admin_state_up": True, "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "firewall_policy_id": "c46a1c15-0496-41c9-beff-9a309a25653e", "id": "e2d1155f-6bc4-4292-9cfa-ea91af4b38c8", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} NOTIFICATION_FIREWALL_RULE_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "firewall_rule.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "firewall_rule": { "protocol": "tcp", "description": "", "source_port": 80, "source_ip_address": '192.168.255.10', "destination_ip_address": '10.10.10.1', "firewall_policy_id": '', "position": None, "destination_port": 80, "id": "53b7c0d3-cb87-4069-9e29-1e866583cc8c", "name": "rule_01", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "enabled": True, "action": "allow", "ip_version": 4, "shared": False}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} NOTIFICATION_FIREWALL_POLICY_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "firewall_policy.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "firewall_policy": {"name": "my_policy", "firewall_rules": [], "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "audited": False, "shared": False, "id": "c46a1c15-0496-41c9-beff-9a309a25653e", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} NOTIFICATION_VPNSERVICE_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "vpnservice.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "vpnservice": {"router_id": "75871c53-e722-4b21-93ed-20cb40b6b672", "status": "ACTIVE", "name": "my_vpn", "admin_state_up": True, "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} NOTIFICATION_IPSEC_POLICY_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "ipsecpolicy.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "ipsecpolicy": {"encapsulation_mode": "tunnel", "encryption_algorithm": "aes-128", "pfs": "group5", "lifetime": { "units": "seconds", "value": 3600}, "name": "my_ipsec_polixy", "transform_protocol": "esp", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "id": "998d910d-4506-47c9-a160-47ec51ff53fc", "auth_algorithm": "sha1", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} NOTIFICATION_IKE_POLICY_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "ikepolicy.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "ikepolicy": {"encryption_algorithm": "aes-128", "pfs": "group5", "name": "my_ike_policy", "phase1_negotiation_mode": "main", "lifetime": {"units": "seconds", "value": 3600}, "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "ike_version": "v1", "id": "11cef94e-3f6a-4b65-8058-7deb1838633a", "auth_algorithm": "sha1", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} NOTIFICATION_IPSEC_SITE_CONN_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "ipsec_site_connection.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "ipsec_site_connection": { "status": "ACTIVE", "psk": "test", "initiator": "bi-directional", "name": "my_ipsec_connection", "admin_state_up": True, "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "ipsecpolicy_id": "998d910d-4506-47c9-a160-47ec51ff53fc", "auth_mode": "psk", "peer_cidrs": ["192.168.255.0/24"], "mtu": 1500, "ikepolicy_id": "11cef94e-3f6a-4b65-8058-7deb1838633a", "dpd": {"action": "hold", "interval": 30, "timeout": 120}, "route_mode": "static", "vpnservice_id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", "peer_address": "10.0.0.1", "peer_id": "10.0.0.254", "id": "06f3c1ec-2e01-4ad6-9c98-4252751fc60a", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} NOTIFICATION_POOL_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-10715057-7590-4529-8020-b994295ee6f4", "event_type": "pool.update.end", "timestamp": "2014-09-15 17:20:50.687649", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "ce255443233748ce9cc71b480974df28", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "pool": { "status": "ACTIVE", "lb_method": "ROUND_ROBIN", "protocol": "HTTP", "description": "", "health_monitors": [], "members": [], "status_description": None, "id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", "vip_id": None, "name": "my_pool", "admin_state_up": True, "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "health_monitors_status": [], "provider": "haproxy"}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:20:49.600299", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "0a5ed7a6-e516-4aed-9968-4ee9f1b65cc2"} NOTIFICATION_VIP_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "vip.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "vip": { "status": "ACTIVE", "protocol": "HTTP", "description": "", "address": "10.0.0.2", "protocol_port": 80, "port_id": "2b5dd476-11da-4d46-9f1e-7a75436062f6", "id": "87a5ce35-f278-47f3-8990-7f695f52f9bf", "status_description": None, "name": "my_vip", "admin_state_up": True, "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "connection_limit": -1, "pool_id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", "session_persistence": {"type": "SOURCE_IP"}}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "3895ad11-98a3-4031-92af-f76e96736661"} NOTIFICATION_HEALTH_MONITORS_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "health_monitor.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "health_monitor": { "admin_state_up": True, "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "delay": 10, "max_retries": 10, "timeout": 10, "pools": [], "type": "PING", "id": "6dea2d01-c3af-4696-9192-6c938f391f01"}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} NOTIFICATION_MEMBERS_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "member.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "member": {"admin_state_up": True, "status": "ACTIVE", "status_description": None, "weight": 1, "address": "10.0.0.3", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "protocol_port": 80, "id": "5e32f960-63ae-4a93-bfa2-339aa83d82ce", "pool_id": "6b73b9f8-d807-4553-87df-eb34cdd08070"}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} NOTIFICATION_FIREWALL_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "firewall.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "firewall": { "status": "ACTIVE", "name": "my_firewall", "admin_state_up": True, "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "firewall_policy_id": "c46a1c15-0496-41c9-beff-9a309a25653e", "id": "e2d1155f-6bc4-4292-9cfa-ea91af4b38c8", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} NOTIFICATION_FIREWALL_RULE_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "firewall_rule.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "firewall_rule": { "protocol": "tcp", "description": "", "source_port": 80, "source_ip_address": '192.168.255.10', "destination_ip_address": '10.10.10.1', "firewall_policy_id": '', "position": None, "destination_port": 80, "id": "53b7c0d3-cb87-4069-9e29-1e866583cc8c", "name": "rule_01", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "enabled": True, "action": "allow", "ip_version": 4, "shared": False}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} NOTIFICATION_FIREWALL_POLICY_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "firewall_policy.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "firewall_policy": {"name": "my_policy", "firewall_rules": [], "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "audited": False, "shared": False, "id": "c46a1c15-0496-41c9-beff-9a309a25653e", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} NOTIFICATION_VPNSERVICE_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "vpnservice.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "vpnservice": {"router_id": "75871c53-e722-4b21-93ed-20cb40b6b672", "status": "ACTIVE", "name": "my_vpn", "admin_state_up": True, "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} NOTIFICATION_IPSEC_POLICY_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "ipsecpolicy.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "ipsecpolicy": {"encapsulation_mode": "tunnel", "encryption_algorithm": "aes-128", "pfs": "group5", "lifetime": { "units": "seconds", "value": 3600}, "name": "my_ipsec_polixy", "transform_protocol": "esp", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "id": "998d910d-4506-47c9-a160-47ec51ff53fc", "auth_algorithm": "sha1", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} NOTIFICATION_IKE_POLICY_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "ikepolicy.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "ikepolicy": {"encryption_algorithm": "aes-128", "pfs": "group5", "name": "my_ike_policy", "phase1_negotiation_mode": "main", "lifetime": {"units": "seconds", "value": 3600}, "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "ike_version": "v1", "id": "11cef94e-3f6a-4b65-8058-7deb1838633a", "auth_algorithm": "sha1", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} NOTIFICATION_IPSEC_SITE_CONN_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "ipsec_site_connection.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "ipsec_site_connection": { "status": "ACTIVE", "psk": "test", "initiator": "bi-directional", "name": "my_ipsec_connection", "admin_state_up": True, "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "ipsecpolicy_id": "998d910d-4506-47c9-a160-47ec51ff53fc", "auth_mode": "psk", "peer_cidrs": ["192.168.255.0/24"], "mtu": 1500, "ikepolicy_id": "11cef94e-3f6a-4b65-8058-7deb1838633a", "dpd": {"action": "hold", "interval": 30, "timeout": 120}, "route_mode": "static", "vpnservice_id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", "peer_address": "10.0.0.1", "peer_id": "10.0.0.254", "id": "06f3c1ec-2e01-4ad6-9c98-4252751fc60a", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} NOTIFICATION_EMPTY_PAYLOAD = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "health_monitor.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "health_monitor": {}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} class TestNotifications(test.BaseTestCase): def test_network_create(self): v = notifications.Network(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_NETWORK_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.create", samples[1].name) def test_bulk_network_create(self): v = notifications.Network(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_BULK_NETWORK_CREATE)) self.assertEqual(4, len(samples)) self.assertEqual("network", samples[0].name) self.assertEqual("network.create", samples[1].name) self.assertEqual("network", samples[2].name) self.assertEqual("network.create", samples[3].name) def test_subnet_create(self): v = notifications.Subnet(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_SUBNET_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("subnet.create", samples[1].name) def test_bulk_subnet_create(self): v = notifications.Subnet(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_BULK_SUBNET_CREATE)) self.assertEqual(4, len(samples)) self.assertEqual("subnet", samples[0].name) self.assertEqual("subnet.create", samples[1].name) self.assertEqual("subnet", samples[2].name) self.assertEqual("subnet.create", samples[3].name) def test_port_create(self): v = notifications.Port(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_PORT_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("port.create", samples[1].name) def test_bulk_port_create(self): v = notifications.Port(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_BULK_PORT_CREATE)) self.assertEqual(4, len(samples)) self.assertEqual("port", samples[0].name) self.assertEqual("port.create", samples[1].name) self.assertEqual("port", samples[2].name) self.assertEqual("port.create", samples[3].name) def test_port_update(self): v = notifications.Port(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_PORT_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("port.update", samples[1].name) def test_network_exists(self): v = notifications.Network(mock.Mock()) samples = v.process_notification(NOTIFICATION_NETWORK_EXISTS) self.assertEqual(1, len(list(samples))) def test_router_exists(self): v = notifications.Router(mock.Mock()) samples = v.process_notification(NOTIFICATION_ROUTER_EXISTS) self.assertEqual(1, len(list(samples))) def test_floatingip_exists(self): v = notifications.FloatingIP(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_FLOATINGIP_EXISTS)) self.assertEqual(1, len(samples)) self.assertEqual("ip.floating", samples[0].name) def test_floatingip_update(self): v = notifications.FloatingIP(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_FLOATINGIP_UPDATE_START)) self.assertEqual(len(samples), 2) self.assertEqual("ip.floating", samples[0].name) def test_pool_create(self): v = notifications.Pool(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_POOL_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.lb.pool", samples[0].name) def test_vip_create(self): v = notifications.Vip(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_VIP_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.lb.vip", samples[0].name) def test_member_create(self): v = notifications.Member(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_MEMBERS_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.lb.member", samples[0].name) def test_health_monitor_create(self): v = notifications.HealthMonitor(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_HEALTH_MONITORS_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.lb.health_monitor", samples[0].name) def test_firewall_create(self): v = notifications.Firewall(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_FIREWALL_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.firewall", samples[0].name) def test_vpnservice_create(self): v = notifications.VPNService(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_VPNSERVICE_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.vpn", samples[0].name) def test_ipsec_connection_create(self): v = notifications.IPSecSiteConnection(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_IPSEC_SITE_CONN_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.vpn.connections", samples[0].name) def test_firewall_policy_create(self): v = notifications.FirewallPolicy(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_FIREWALL_POLICY_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.firewall.policy", samples[0].name) def test_firewall_rule_create(self): v = notifications.FirewallRule(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_FIREWALL_RULE_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.firewall.rule", samples[0].name) def test_ipsec_policy_create(self): v = notifications.IPSecPolicy(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_IPSEC_POLICY_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.vpn.ipsecpolicy", samples[0].name) def test_ike_policy_create(self): v = notifications.IKEPolicy(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_IKE_POLICY_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.vpn.ikepolicy", samples[0].name) def test_pool_update(self): v = notifications.Pool(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_POOL_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.lb.pool", samples[0].name) def test_vip_update(self): v = notifications.Vip(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_VIP_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.lb.vip", samples[0].name) def test_member_update(self): v = notifications.Member(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_MEMBERS_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.lb.member", samples[0].name) def test_health_monitor_update(self): v = notifications.HealthMonitor(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_HEALTH_MONITORS_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.lb.health_monitor", samples[0].name) def test_firewall_update(self): v = notifications.Firewall(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_FIREWALL_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.firewall", samples[0].name) def test_vpnservice_update(self): v = notifications.VPNService(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_VPNSERVICE_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.vpn", samples[0].name) def test_ipsec_connection_update(self): v = notifications.IPSecSiteConnection(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_IPSEC_SITE_CONN_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.vpn.connections", samples[0].name) def test_firewall_policy_update(self): v = notifications.FirewallPolicy(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_FIREWALL_POLICY_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.firewall.policy", samples[0].name) def test_firewall_rule_update(self): v = notifications.FirewallRule(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_FIREWALL_RULE_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.firewall.rule", samples[0].name) def test_ipsec_policy_update(self): v = notifications.IPSecPolicy(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_IPSEC_POLICY_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.vpn.ipsecpolicy", samples[0].name) def test_ike_policy_update(self): v = notifications.IKEPolicy(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_IKE_POLICY_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.vpn.ikepolicy", samples[0].name) def test_empty_event_payload(self): v = notifications.HealthMonitor(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_EMPTY_PAYLOAD)) self.assertEqual(0, len(samples)) class TestEventTypes(test.BaseTestCase): def test_network(self): v = notifications.Network(mock.Mock()) events = v.event_types self.assertIsNotEmpty(events) def test_subnet(self): v = notifications.Subnet(mock.Mock()) events = v.event_types self.assertIsNotEmpty(events) def test_port(self): v = notifications.Port(mock.Mock()) events = v.event_types self.assertIsNotEmpty(events) def test_router(self): self.assertTrue(notifications.Router(mock.Mock()).event_types) def test_floatingip(self): self.assertTrue(notifications.FloatingIP(mock.Mock()).event_types) def test_pool(self): self.assertTrue(notifications.Pool(mock.Mock()).event_types) def test_vip(self): self.assertTrue(notifications.Vip(mock.Mock()).event_types) def test_member(self): self.assertTrue(notifications.Member(mock.Mock()).event_types) def test_health_monitor(self): self.assertTrue(notifications.HealthMonitor(mock.Mock()).event_types) def test_firewall(self): self.assertTrue(notifications.Firewall(mock.Mock()).event_types) def test_vpnservice(self): self.assertTrue(notifications.VPNService(mock.Mock()).event_types) def test_ipsec_connection(self): self.assertTrue(notifications.IPSecSiteConnection( mock.Mock()).event_types) def test_firewall_policy(self): self.assertTrue(notifications.FirewallPolicy(mock.Mock()).event_types) def test_firewall_rule(self): self.assertTrue(notifications.FirewallRule(mock.Mock()).event_types) def test_ipsec_policy(self): self.assertTrue(notifications.IPSecPolicy(mock.Mock()).event_types) def test_ike_policy(self): self.assertTrue(notifications.IKEPolicy(mock.Mock()).event_types) ceilometer-6.1.5/ceilometer/tests/unit/__init__.py0000664000567000056710000000000013072744703023373 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/tests/__init__.py0000664000567000056710000000000013072744703022414 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/collector.py0000664000567000056710000001526113072744705021522 0ustar jenkinsjenkins00000000000000# # Copyright 2012-2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from itertools import chain import socket import msgpack from oslo_config import cfg from oslo_log import log import oslo_messaging from oslo_service import service as os_service from oslo_utils import netutils from oslo_utils import units from ceilometer import dispatcher from ceilometer.i18n import _, _LE from ceilometer import messaging from ceilometer import utils OPTS = [ cfg.StrOpt('udp_address', default='0.0.0.0', help='Address to which the UDP socket is bound. Set to ' 'an empty string to disable.'), cfg.PortOpt('udp_port', default=4952, help='Port to which the UDP socket is bound.'), cfg.IntOpt('batch_size', default=1, help='Number of notification messages to wait before ' 'dispatching them'), cfg.IntOpt('batch_timeout', default=None, help='Number of seconds to wait before dispatching samples' 'when batch_size is not reached (None means indefinitely)'), ] cfg.CONF.register_opts(OPTS, group="collector") cfg.CONF.import_opt('metering_topic', 'ceilometer.publisher.messaging', group='publisher_notifier') cfg.CONF.import_opt('event_topic', 'ceilometer.publisher.messaging', group='publisher_notifier') cfg.CONF.import_opt('store_events', 'ceilometer.notification', group='notification') LOG = log.getLogger(__name__) class CollectorService(os_service.Service): """Listener for the collector service.""" def start(self): """Bind the UDP socket and handle incoming data.""" # ensure dispatcher is configured before starting other services dispatcher_managers = dispatcher.load_dispatcher_manager() (self.meter_manager, self.event_manager) = dispatcher_managers self.sample_listener = None self.event_listener = None super(CollectorService, self).start() if cfg.CONF.collector.udp_address: self.tg.add_thread(self.start_udp) transport = messaging.get_transport(optional=True) if transport: if list(self.meter_manager): sample_target = oslo_messaging.Target( topic=cfg.CONF.publisher_notifier.metering_topic) self.sample_listener = ( messaging.get_batch_notification_listener( transport, [sample_target], [SampleEndpoint(self.meter_manager)], allow_requeue=True, batch_size=cfg.CONF.collector.batch_size, batch_timeout=cfg.CONF.collector.batch_timeout)) self.sample_listener.start() if cfg.CONF.notification.store_events and list(self.event_manager): event_target = oslo_messaging.Target( topic=cfg.CONF.publisher_notifier.event_topic) self.event_listener = ( messaging.get_batch_notification_listener( transport, [event_target], [EventEndpoint(self.event_manager)], allow_requeue=True, batch_size=cfg.CONF.collector.batch_size, batch_timeout=cfg.CONF.collector.batch_timeout)) self.event_listener.start() if not cfg.CONF.collector.udp_address: # Add a dummy thread to have wait() working self.tg.add_timer(604800, lambda: None) def start_udp(self): address_family = socket.AF_INET if netutils.is_valid_ipv6(cfg.CONF.collector.udp_address): address_family = socket.AF_INET6 udp = socket.socket(address_family, socket.SOCK_DGRAM) udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) udp.bind((cfg.CONF.collector.udp_address, cfg.CONF.collector.udp_port)) self.udp_run = True while self.udp_run: # NOTE(jd) Arbitrary limit of 64K because that ought to be # enough for anybody. data, source = udp.recvfrom(64 * units.Ki) try: sample = msgpack.loads(data, encoding='utf-8') except Exception: LOG.warning(_("UDP: Cannot decode data sent by %s"), source) else: try: LOG.debug("UDP: Storing %s", sample) self.meter_manager.map_method('record_metering_data', sample) except Exception: LOG.exception(_("UDP: Unable to store meter")) def stop(self): self.udp_run = False if self.sample_listener: utils.kill_listeners([self.sample_listener]) if self.event_listener: utils.kill_listeners([self.event_listener]) super(CollectorService, self).stop() def record_metering_data(self, context, data): """RPC endpoint for messages we send to ourselves. When the notification messages are re-published through the RPC publisher, this method receives them for processing. """ self.meter_manager.map_method('record_metering_data', data=data) class CollectorEndpoint(object): def __init__(self, dispatcher_manager): self.dispatcher_manager = dispatcher_manager def sample(self, messages): """RPC endpoint for notification messages When another service sends a notification over the message bus, this method receives it. """ samples = list(chain.from_iterable(m["payload"] for m in messages)) try: self.dispatcher_manager.map_method(self.method, samples) except Exception: LOG.exception(_LE("Dispatcher failed to handle the %s, " "requeue it."), self.ep_type) return oslo_messaging.NotificationResult.REQUEUE class SampleEndpoint(CollectorEndpoint): method = 'record_metering_data' ep_type = 'sample' class EventEndpoint(CollectorEndpoint): method = 'record_events' ep_type = 'event' ceilometer-6.1.5/ceilometer/dispatcher/0000775000567000056710000000000013072745164021303 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/dispatcher/database.py0000664000567000056710000001215113072744705023421 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_utils import timeutils from ceilometer import dispatcher from ceilometer.event.storage import models from ceilometer.i18n import _LE, _LW from ceilometer.publisher import utils as publisher_utils from ceilometer import storage LOG = log.getLogger(__name__) class DatabaseDispatcher(dispatcher.MeterDispatcherBase, dispatcher.EventDispatcherBase): """Dispatcher class for recording metering data into database. The dispatcher class which records each meter into a database configured in ceilometer configuration file. To enable this dispatcher, the following section needs to be present in ceilometer.conf file [DEFAULT] meter_dispatchers = database event_dispatchers = database """ def __init__(self, conf): super(DatabaseDispatcher, self).__init__(conf) self._meter_conn = self._get_db_conn('metering', True) self._event_conn = self._get_db_conn('event', True) def _get_db_conn(self, purpose, ignore_exception=False): try: return storage.get_connection_from_config(self.conf, purpose) except Exception as err: params = {"purpose": purpose, "err": err} LOG.exception(_LE("Failed to connect to db, purpose %(purpose)s " "re-try later: %(err)s") % params) if not ignore_exception: raise @property def meter_conn(self): if not self._meter_conn: self._meter_conn = self._get_db_conn('metering') return self._meter_conn @property def event_conn(self): if not self._event_conn: self._event_conn = self._get_db_conn('event') return self._event_conn def record_metering_data(self, data): # We may have receive only one counter on the wire if not isinstance(data, list): data = [data] for meter in data: LOG.debug( 'metering data %(counter_name)s ' 'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s', {'counter_name': meter['counter_name'], 'resource_id': meter['resource_id'], 'timestamp': meter.get('timestamp', 'NO TIMESTAMP'), 'counter_volume': meter['counter_volume']}) if publisher_utils.verify_signature( meter, self.conf.publisher.telemetry_secret): try: # Convert the timestamp to a datetime instance. # Storage engines are responsible for converting # that value to something they can store. if meter.get('timestamp'): ts = timeutils.parse_isotime(meter['timestamp']) meter['timestamp'] = timeutils.normalize_time(ts) self.meter_conn.record_metering_data(meter) except Exception as err: LOG.exception(_LE('Failed to record metering data: %s'), err) # raise the exception to propagate it up in the chain. raise else: LOG.warning(_LW( 'message signature invalid, discarding message: %r'), meter) def record_events(self, events): if not isinstance(events, list): events = [events] event_list = [] for ev in events: if publisher_utils.verify_signature( ev, self.conf.publisher.telemetry_secret): try: event_list.append( models.Event( message_id=ev['message_id'], event_type=ev['event_type'], generated=timeutils.normalize_time( timeutils.parse_isotime(ev['generated'])), traits=[models.Trait( name, dtype, models.Trait.convert_value(dtype, value)) for name, dtype, value in ev['traits']], raw=ev.get('raw', {})) ) except Exception: LOG.exception(_LE("Error processing event and it will be " "dropped: %s"), ev) else: LOG.warning(_LW( 'event signature invalid, discarding event: %s'), ev) self.event_conn.record_events(event_list) ceilometer-6.1.5/ceilometer/dispatcher/gnocchi.py0000664000567000056710000004055713072744705023302 0ustar jenkinsjenkins00000000000000# # Copyright 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import defaultdict from hashlib import md5 import itertools import operator import threading import uuid from gnocchiclient import client from gnocchiclient import exceptions as gnocchi_exc from keystoneauth1 import session as ka_session from oslo_config import cfg from oslo_log import log import requests import retrying import six from stevedore import extension from ceilometer import declarative from ceilometer import dispatcher from ceilometer.i18n import _, _LE, _LW from ceilometer import keystone_client from ceilometer import utils NAME_ENCODED = __name__.encode('utf-8') CACHE_NAMESPACE = uuid.UUID(bytes=md5(NAME_ENCODED).digest()) LOG = log.getLogger(__name__) dispatcher_opts = [ cfg.BoolOpt('filter_service_activity', default=True, help='Filter out samples generated by Gnocchi ' 'service activity'), cfg.StrOpt('filter_project', default='gnocchi', help='Gnocchi project used to filter out samples ' 'generated by Gnocchi service activity'), cfg.StrOpt('url', deprecated_for_removal=True, help='URL to Gnocchi. default: autodetection'), cfg.StrOpt('archive_policy', help='The archive policy to use when the dispatcher ' 'create a new metric.'), cfg.StrOpt('resources_definition_file', default='gnocchi_resources.yaml', help=_('The Yaml file that defines mapping between samples ' 'and gnocchi resources/metrics')), ] cfg.CONF.register_opts(dispatcher_opts, group="dispatcher_gnocchi") def cache_key_mangler(key): """Construct an opaque cache key.""" if six.PY2: key = key.encode('utf-8') return uuid.uuid5(CACHE_NAMESPACE, key).hex def log_and_ignore_unexpected_workflow_error(func): def log_and_ignore(self, *args, **kwargs): try: func(self, *args, **kwargs) except gnocchi_exc.ClientException as e: LOG.error(six.text_type(e)) except Exception as e: LOG.error(six.text_type(e), exc_info=True) return log_and_ignore class ResourcesDefinitionException(Exception): def __init__(self, message, definition_cfg): msg = '%s %s: %s' % (self.__class__.__name__, definition_cfg, message) super(ResourcesDefinitionException, self).__init__(msg) class ResourcesDefinition(object): MANDATORY_FIELDS = {'resource_type': six.string_types, 'metrics': list} def __init__(self, definition_cfg, default_archive_policy, plugin_manager): self._default_archive_policy = default_archive_policy self.cfg = definition_cfg for field, field_type in self.MANDATORY_FIELDS.items(): if field not in self.cfg: raise declarative.DefinitionException( _LE("Required field %s not specified") % field, self.cfg) if not isinstance(self.cfg[field], field_type): raise declarative.DefinitionException( _LE("Required field %(field)s should be a %(type)s") % {'field': field, 'type': field_type}, self.cfg) self._attributes = {} for name, attr_cfg in self.cfg.get('attributes', {}).items(): self._attributes[name] = declarative.Definition(name, attr_cfg, plugin_manager) self.metrics = {} for t in self.cfg['metrics']: archive_policy = self.cfg.get('archive_policy', self._default_archive_policy) if archive_policy is None: self.metrics[t] = {} else: self.metrics[t] = dict(archive_policy_name=archive_policy) def match(self, metric_name): for t in self.cfg['metrics']: if utils.match(metric_name, t): return True return False def attributes(self, sample): attrs = {} for name, definition in self._attributes.items(): value = definition.parse(sample) if value is not None: attrs[name] = value return attrs def get_gnocchiclient(conf): requests_session = requests.session() for scheme in requests_session.adapters.keys(): requests_session.mount(scheme, ka_session.TCPKeepAliveAdapter( pool_block=True)) session = keystone_client.get_session(requests_session=requests_session) return client.Client('1', session, interface=conf.service_credentials.interface, region_name=conf.service_credentials.region_name, endpoint_override=conf.dispatcher_gnocchi.url) class LockedDefaultDict(defaultdict): """defaultdict with lock to handle threading Dictionary only deletes if nothing is accessing dict and nothing is holding lock to be deleted. If both cases are not true, it will skip delete. """ def __init__(self, *args, **kwargs): self.lock = threading.Lock() super(LockedDefaultDict, self).__init__(*args, **kwargs) def __getitem__(self, key): with self.lock: return super(LockedDefaultDict, self).__getitem__(key) def pop(self, key, *args): with self.lock: key_lock = super(LockedDefaultDict, self).__getitem__(key) if key_lock.acquire(False): try: super(LockedDefaultDict, self).pop(key, *args) finally: key_lock.release() class GnocchiDispatcher(dispatcher.MeterDispatcherBase): """Dispatcher class for recording metering data into database. The dispatcher class records each meter into the gnocchi service configured in ceilometer configuration file. An example configuration may look like the following: [dispatcher_gnocchi] url = http://localhost:8041 archive_policy = low To enable this dispatcher, the following section needs to be present in ceilometer.conf file [DEFAULT] meter_dispatchers = gnocchi """ def __init__(self, conf): super(GnocchiDispatcher, self).__init__(conf) self.conf = conf self.filter_service_activity = ( conf.dispatcher_gnocchi.filter_service_activity) self._ks_client = keystone_client.get_client() self.resources_definition = self._load_resources_definitions(conf) self.cache = None try: import oslo_cache oslo_cache.configure(self.conf) # NOTE(cdent): The default cache backend is a real but # noop backend. We don't want to use that here because # we want to avoid the cache pathways entirely if the # cache has not been configured explicitly. if 'null' not in self.conf.cache.backend: cache_region = oslo_cache.create_region() self.cache = oslo_cache.configure_cache_region( self.conf, cache_region) self.cache.key_mangler = cache_key_mangler except ImportError: pass except oslo_cache.exception.ConfigurationError as exc: LOG.warning(_LW('unable to configure oslo_cache: %s') % exc) self._gnocchi_project_id = None self._gnocchi_project_id_lock = threading.Lock() self._gnocchi_resource_lock = LockedDefaultDict(threading.Lock) self._gnocchi = get_gnocchiclient(conf) # Convert retry_interval secs to msecs for retry decorator retries = conf.storage.max_retries @retrying.retry(wait_fixed=conf.storage.retry_interval * 1000, stop_max_attempt_number=(retries if retries >= 0 else None)) def _get_connection(): self._gnocchi.capabilities.list() try: _get_connection() except Exception: LOG.error(_LE('Failed to connect to Gnocchi.')) raise @classmethod def _load_resources_definitions(cls, conf): plugin_manager = extension.ExtensionManager( namespace='ceilometer.event.trait_plugin') data = declarative.load_definitions( {}, conf.dispatcher_gnocchi.resources_definition_file) resource_defs = [] for resource in data.get('resources', []): try: resource_defs.append(ResourcesDefinition( resource, conf.dispatcher_gnocchi.archive_policy, plugin_manager)) except Exception as exc: LOG.error(_LE("Failed to load resource due to error %s") % exc) return resource_defs @property def gnocchi_project_id(self): if self._gnocchi_project_id is not None: return self._gnocchi_project_id with self._gnocchi_project_id_lock: if self._gnocchi_project_id is None: try: project = self._ks_client.projects.find( name=self.conf.dispatcher_gnocchi.filter_project) except Exception: LOG.exception('fail to retrieve user of Gnocchi service') raise self._gnocchi_project_id = project.id LOG.debug("gnocchi project found: %s", self.gnocchi_project_id) return self._gnocchi_project_id def _is_swift_account_sample(self, sample): return bool([rd for rd in self.resources_definition if rd.cfg['resource_type'] == 'swift_account' and rd.match(sample['counter_name'])]) def _is_gnocchi_activity(self, sample): return (self.filter_service_activity and ( # avoid anything from the user used by gnocchi sample['project_id'] == self.gnocchi_project_id or # avoid anything in the swift account used by gnocchi (sample['resource_id'] == self.gnocchi_project_id and self._is_swift_account_sample(sample)) )) def _get_resource_definition(self, metric_name): for rd in self.resources_definition: if rd.match(metric_name): return rd def record_metering_data(self, data): # We may have receive only one counter on the wire if not isinstance(data, list): data = [data] # NOTE(sileht): skip sample generated by gnocchi itself data = [s for s in data if not self._is_gnocchi_activity(s)] # FIXME(sileht): This method bulk the processing of samples # grouped by resource_id and metric_name but this is not # efficient yet because the data received here doesn't often # contains a lot of different kind of samples # So perhaps the next step will be to pool the received data from # message bus. data.sort(key=lambda s: (s['resource_id'], s['counter_name'])) resource_grouped_samples = itertools.groupby( data, key=operator.itemgetter('resource_id')) for resource_id, samples_of_resource in resource_grouped_samples: metric_grouped_samples = itertools.groupby( list(samples_of_resource), key=operator.itemgetter('counter_name')) self._process_resource(resource_id, metric_grouped_samples) @log_and_ignore_unexpected_workflow_error def _process_resource(self, resource_id, metric_grouped_samples): resource_extra = {} for metric_name, samples in metric_grouped_samples: samples = list(samples) rd = self._get_resource_definition(metric_name) if rd is None: LOG.warning("metric %s is not handled by gnocchi" % metric_name) continue if rd.cfg.get("ignore"): continue resource_type = rd.cfg['resource_type'] resource = { "id": resource_id, "user_id": samples[0]['user_id'], "project_id": samples[0]['project_id'], "metrics": rd.metrics, } measures = [] for sample in samples: resource_extra.update(rd.attributes(sample)) measures.append({'timestamp': sample['timestamp'], 'value': sample['counter_volume']}) resource.update(resource_extra) retry = True try: self._gnocchi.metric.add_measures(metric_name, measures, resource_id) except gnocchi_exc.ResourceNotFound: self._if_not_cached("create", resource_type, resource, self._create_resource) except gnocchi_exc.MetricNotFound: metric = {'resource_id': resource['id'], 'name': metric_name} metric.update(rd.metrics[metric_name]) try: self._gnocchi.metric.create(metric) except gnocchi_exc.NamedMetricAlreadyExists: # NOTE(sileht): metric created in the meantime pass else: retry = False if retry: self._gnocchi.metric.add_measures(metric_name, measures, resource_id) LOG.debug("Measure posted on metric %s of resource %s", metric_name, resource_id) if resource_extra: self._if_not_cached("update", resource_type, resource, self._update_resource, resource_extra) def _create_resource(self, resource_type, resource): try: self._gnocchi.resource.create(resource_type, resource) LOG.debug('Resource %s created', resource["id"]) except gnocchi_exc.ResourceAlreadyExists: # NOTE(sileht): resource created in the meantime pass def _update_resource(self, resource_type, resource, resource_extra): self._gnocchi.resource.update(resource_type, resource["id"], resource_extra) LOG.debug('Resource %s updated', resource["id"]) def _if_not_cached(self, operation, resource_type, resource, method, *args, **kwargs): if self.cache: cache_key = resource['id'] attribute_hash = self._check_resource_cache(cache_key, resource) if attribute_hash: with self._gnocchi_resource_lock[cache_key]: # NOTE(luogangyi): there is a possibility that the # resource was already built in cache by another # ceilometer-collector when we get the lock here. attribute_hash = self._check_resource_cache(cache_key, resource) if attribute_hash: method(resource_type, resource, *args, **kwargs) self.cache.set(cache_key, attribute_hash) else: LOG.debug('resource cache recheck hit for ' '%s %s', operation, cache_key) self._gnocchi_resource_lock.pop(cache_key, None) else: LOG.debug('Resource cache hit for %s %s', operation, cache_key) else: method(resource_type, resource, *args, **kwargs) def _check_resource_cache(self, key, resource_data): cached_hash = self.cache.get(key) attribute_hash = hash(frozenset(filter(lambda x: x[0] != "metrics", resource_data.items()))) if not cached_hash or cached_hash != attribute_hash: return attribute_hash else: return None ceilometer-6.1.5/ceilometer/dispatcher/file.py0000664000567000056710000000541613072744705022602 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import logging.handlers from oslo_config import cfg from ceilometer import dispatcher OPTS = [ cfg.StrOpt('file_path', help='Name and the location of the file to record ' 'meters.'), cfg.IntOpt('max_bytes', default=0, help='The max size of the file.'), cfg.IntOpt('backup_count', default=0, help='The max number of the files to keep.'), ] cfg.CONF.register_opts(OPTS, group="dispatcher_file") class FileDispatcher(dispatcher.MeterDispatcherBase, dispatcher.EventDispatcherBase): """Dispatcher class for recording metering data to a file. The dispatcher class which logs each meter and/or event into a file configured in ceilometer configuration file. An example configuration may look like the following: [dispatcher_file] file_path = /tmp/meters To enable this dispatcher, the following section needs to be present in ceilometer.conf file [DEFAULT] meter_dispatchers = file event_dispatchers = file """ def __init__(self, conf): super(FileDispatcher, self).__init__(conf) self.log = None # if the directory and path are configured, then log to the file if self.conf.dispatcher_file.file_path: dispatcher_logger = logging.Logger('dispatcher.file') dispatcher_logger.setLevel(logging.INFO) # create rotating file handler which logs meters rfh = logging.handlers.RotatingFileHandler( self.conf.dispatcher_file.file_path, maxBytes=self.conf.dispatcher_file.max_bytes, backupCount=self.conf.dispatcher_file.backup_count, encoding='utf8') rfh.setLevel(logging.INFO) # Only wanted the meters to be saved in the file, not the # project root logger. dispatcher_logger.propagate = False dispatcher_logger.addHandler(rfh) self.log = dispatcher_logger def record_metering_data(self, data): if self.log: self.log.info(data) def record_events(self, events): if self.log: self.log.info(events) ceilometer-6.1.5/ceilometer/dispatcher/http.py0000664000567000056710000001215613072744705022641 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from oslo_config import cfg from oslo_log import log import requests from ceilometer import dispatcher from ceilometer.i18n import _, _LE, _LW from ceilometer.publisher import utils as publisher_utils LOG = log.getLogger(__name__) http_dispatcher_opts = [ cfg.StrOpt('target', default='', help='The target where the http request will be sent. ' 'If this is not set, no data will be posted. For ' 'example: target = http://hostname:1234/path'), cfg.StrOpt('event_target', help='The target for event data where the http request ' 'will be sent to. If this is not set, it will default ' 'to same as Sample target.'), cfg.IntOpt('timeout', default=5, help='The max time in seconds to wait for a request to ' 'timeout.'), ] cfg.CONF.register_opts(http_dispatcher_opts, group="dispatcher_http") class HttpDispatcher(dispatcher.MeterDispatcherBase, dispatcher.EventDispatcherBase): """Dispatcher class for posting metering/event data into a http target. To enable this dispatcher, the following option needs to be present in ceilometer.conf file:: [DEFAULT] meter_dispatchers = http event_dispatchers = http Dispatcher specific options can be added as follows:: [dispatcher_http] target = www.example.com event_target = www.example.com timeout = 2 """ def __init__(self, conf): super(HttpDispatcher, self).__init__(conf) self.headers = {'Content-type': 'application/json'} self.timeout = self.conf.dispatcher_http.timeout self.target = self.conf.dispatcher_http.target self.event_target = (self.conf.dispatcher_http.event_target or self.target) def record_metering_data(self, data): if self.target == '': # if the target was not set, do not do anything LOG.error(_('Dispatcher target was not set, no meter will ' 'be posted. Set the target in the ceilometer.conf ' 'file')) return # We may have receive only one counter on the wire if not isinstance(data, list): data = [data] for meter in data: LOG.debug( 'metering data %(counter_name)s ' 'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s', {'counter_name': meter['counter_name'], 'resource_id': meter['resource_id'], 'timestamp': meter.get('timestamp', 'NO TIMESTAMP'), 'counter_volume': meter['counter_volume']}) if publisher_utils.verify_signature( meter, self.conf.publisher.telemetry_secret): try: # Every meter should be posted to the target res = requests.post(self.target, data=json.dumps(meter), headers=self.headers, timeout=self.timeout) LOG.debug('Message posting finished with status code ' '%d.', res.status_code) except Exception as err: LOG.exception(_('Failed to record metering data: %s'), err) else: LOG.warning(_( 'message signature invalid, discarding message: %r'), meter) def record_events(self, events): if not isinstance(events, list): events = [events] for event in events: if publisher_utils.verify_signature( event, self.conf.publisher.telemetry_secret): res = None try: res = requests.post(self.event_target, data=event, headers=self.headers, timeout=self.timeout) res.raise_for_status() except Exception: error_code = res.status_code if res else 'unknown' LOG.exception(_LE('Status Code: %{code}s. Failed to' 'dispatch event: %{event}s'), {'code': error_code, 'event': event}) else: LOG.warning(_LW( 'event signature invalid, discarding event: %s'), event) ceilometer-6.1.5/ceilometer/dispatcher/__init__.py0000664000567000056710000000557313072744705023426 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_log import log import six from stevedore import named from ceilometer.i18n import _LW LOG = log.getLogger(__name__) OPTS = [ cfg.MultiStrOpt('meter_dispatchers', deprecated_name='dispatcher', default=['database'], help='Dispatchers to process metering data.'), cfg.MultiStrOpt('event_dispatchers', default=['database'], deprecated_name='dispatcher', help='Dispatchers to process event data.'), ] cfg.CONF.register_opts(OPTS) STORAGE_OPTS = [ cfg.IntOpt('max_retries', default=10, deprecated_group='database', help='Maximum number of connection retries during startup. ' 'Set to -1 to specify an infinite retry count.'), cfg.IntOpt('retry_interval', default=10, deprecated_group='database', help='Interval (in seconds) between retries of connection.'), ] cfg.CONF.register_opts(STORAGE_OPTS, group='storage') def _load_dispatcher_manager(dispatcher_type): namespace = 'ceilometer.dispatcher.%s' % dispatcher_type conf_name = '%s_dispatchers' % dispatcher_type LOG.debug('loading dispatchers from %s', namespace) # set propagate_map_exceptions to True to enable stevedore # to propagate exceptions. dispatcher_manager = named.NamedExtensionManager( namespace=namespace, names=getattr(cfg.CONF, conf_name), invoke_on_load=True, invoke_args=[cfg.CONF], propagate_map_exceptions=True) if not list(dispatcher_manager): LOG.warning(_LW('Failed to load any dispatchers for %s'), namespace) return dispatcher_manager def load_dispatcher_manager(): return (_load_dispatcher_manager('meter'), _load_dispatcher_manager('event')) class Base(object): def __init__(self, conf): self.conf = conf @six.add_metaclass(abc.ABCMeta) class MeterDispatcherBase(Base): @abc.abstractmethod def record_metering_data(self, data): """Recording metering data interface.""" @six.add_metaclass(abc.ABCMeta) class EventDispatcherBase(Base): @abc.abstractmethod def record_events(self, events): """Recording events interface.""" ceilometer-6.1.5/ceilometer/publisher/0000775000567000056710000000000013072745164021152 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/publisher/test.py0000664000567000056710000000271013072744706022504 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Publish a sample in memory, useful for testing """ from ceilometer import publisher class TestPublisher(publisher.PublisherBase): """Publisher used in unit testing.""" def __init__(self, parsed_url): self.samples = [] self.events = [] self.calls = 0 def publish_samples(self, context, samples): """Send a metering message for publishing :param context: Execution context from the service or RPC call :param samples: Samples from pipeline after transformation """ self.samples.extend(samples) self.calls += 1 def publish_events(self, context, events): """Send an event message for publishing :param context: Execution context from the service or RPC call :param events: events from pipeline after transformation """ self.events.extend(events) self.calls += 1 ceilometer-6.1.5/ceilometer/publisher/file.py0000664000567000056710000000744513072744706022456 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import logging.handlers from oslo_log import log from six.moves.urllib import parse as urlparse import ceilometer from ceilometer.i18n import _ from ceilometer import publisher LOG = log.getLogger(__name__) class FilePublisher(publisher.PublisherBase): """Publisher metering data to file. The publisher which records metering data into a file. The file name and location should be configured in ceilometer pipeline configuration file. If a file name and location is not specified, this File Publisher will not log any meters other than log a warning in Ceilometer log file. To enable this publisher, add the following section to the /etc/ceilometer/publisher.yaml file or simply add it to an existing pipeline:: - name: meter_file interval: 600 counters: - "*" transformers: publishers: - file:///var/test?max_bytes=10000000&backup_count=5 File path is required for this publisher to work properly. If max_bytes or backup_count is missing, FileHandler will be used to save the metering data. If max_bytes and backup_count are present, RotatingFileHandler will be used to save the metering data. """ def __init__(self, parsed_url): super(FilePublisher, self).__init__(parsed_url) self.publisher_logger = None path = parsed_url.path if not path or path.lower() == 'file': LOG.error(_('The path for the file publisher is required')) return rfh = None max_bytes = 0 backup_count = 0 # Handling other configuration options in the query string if parsed_url.query: params = urlparse.parse_qs(parsed_url.query) if params.get('max_bytes') and params.get('backup_count'): try: max_bytes = int(params.get('max_bytes')[0]) backup_count = int(params.get('backup_count')[0]) except ValueError: LOG.error(_('max_bytes and backup_count should be ' 'numbers.')) return # create rotating file handler rfh = logging.handlers.RotatingFileHandler( path, encoding='utf8', maxBytes=max_bytes, backupCount=backup_count) self.publisher_logger = logging.Logger('publisher.file') self.publisher_logger.propagate = False self.publisher_logger.setLevel(logging.INFO) rfh.setLevel(logging.INFO) self.publisher_logger.addHandler(rfh) def publish_samples(self, context, samples): """Send a metering message for publishing :param context: Execution context from the service or RPC call :param samples: Samples from pipeline after transformation """ if self.publisher_logger: for sample in samples: self.publisher_logger.info(sample.as_dict()) def publish_events(self, context, events): """Send an event message for publishing :param context: Execution context from the service or RPC call :param events: events from pipeline after transformation """ raise ceilometer.NotImplementedError ceilometer-6.1.5/ceilometer/publisher/direct.py0000664000567000056710000000410213072744706022774 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import timeutils from ceilometer.dispatcher import database from ceilometer import publisher from ceilometer.publisher import utils class DirectPublisher(publisher.PublisherBase): """A publisher that allows saving directly from the pipeline. Samples are saved to the currently configured database by hitching a ride on the DatabaseDispatcher. This is useful where it is desirable to limit the number of external services that are required. """ def __init__(self, parsed_url): super(DirectPublisher, self).__init__(parsed_url) dispatcher = database.DatabaseDispatcher(cfg.CONF) self.meter_conn = dispatcher.meter_conn self.event_conn = dispatcher.event_conn def publish_samples(self, context, samples): if not isinstance(samples, list): samples = [samples] # Transform the Sample objects into a list of dicts meters = [ utils.meter_message_from_counter( sample, cfg.CONF.publisher.telemetry_secret) for sample in samples ] for meter in meters: if meter.get('timestamp'): ts = timeutils.parse_isotime(meter['timestamp']) meter['timestamp'] = timeutils.normalize_time(ts) self.meter_conn.record_metering_data(meter) def publish_events(self, context, events): if not isinstance(events, list): events = [events] self.event_conn.record_events(events) ceilometer-6.1.5/ceilometer/publisher/udp.py0000664000567000056710000000514213072744706022317 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Publish a sample using an UDP mechanism """ import socket import msgpack from oslo_config import cfg from oslo_log import log from oslo_utils import netutils import ceilometer from ceilometer.i18n import _ from ceilometer import publisher from ceilometer.publisher import utils cfg.CONF.import_opt('udp_port', 'ceilometer.collector', group='collector') LOG = log.getLogger(__name__) class UDPPublisher(publisher.PublisherBase): def __init__(self, parsed_url): self.host, self.port = netutils.parse_host_port( parsed_url.netloc, default_port=cfg.CONF.collector.udp_port) if netutils.is_valid_ipv6(self.host): addr_family = socket.AF_INET6 else: addr_family = socket.AF_INET self.socket = socket.socket(addr_family, socket.SOCK_DGRAM) def publish_samples(self, context, samples): """Send a metering message for publishing :param context: Execution context from the service or RPC call :param samples: Samples from pipeline after transformation """ for sample in samples: msg = utils.meter_message_from_counter( sample, cfg.CONF.publisher.telemetry_secret) host = self.host port = self.port LOG.debug("Publishing sample %(msg)s over UDP to " "%(host)s:%(port)d", {'msg': msg, 'host': host, 'port': port}) try: self.socket.sendto(msgpack.dumps(msg), (self.host, self.port)) except Exception as e: LOG.warning(_("Unable to send sample over UDP")) LOG.exception(e) def publish_events(self, context, events): """Send an event message for publishing :param context: Execution context from the service or RPC call :param events: events from pipeline after transformation """ raise ceilometer.NotImplementedError ceilometer-6.1.5/ceilometer/publisher/messaging.py0000664000567000056710000001771313072744706023513 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Publish a sample using the preferred RPC mechanism. """ import abc import itertools import operator from oslo_config import cfg from oslo_log import log import oslo_messaging from oslo_utils import encodeutils from oslo_utils import excutils import six import six.moves.urllib.parse as urlparse from ceilometer.i18n import _, _LE, _LI from ceilometer import messaging from ceilometer import publisher from ceilometer.publisher import utils LOG = log.getLogger(__name__) NOTIFIER_OPTS = [ cfg.StrOpt('metering_topic', default='metering', help='The topic that ceilometer uses for metering ' 'notifications.', ), cfg.StrOpt('event_topic', default='event', help='The topic that ceilometer uses for event ' 'notifications.', ), cfg.StrOpt('telemetry_driver', default='messagingv2', help='The driver that ceilometer uses for metering ' 'notifications.', deprecated_name='metering_driver', ) ] cfg.CONF.register_opts(NOTIFIER_OPTS, group="publisher_notifier") cfg.CONF.import_opt('host', 'ceilometer.service') class DeliveryFailure(Exception): def __init__(self, message=None, cause=None): super(DeliveryFailure, self).__init__(message) self.cause = cause def raise_delivery_failure(exc): excutils.raise_with_cause(DeliveryFailure, encodeutils.exception_to_unicode(exc), cause=exc) @six.add_metaclass(abc.ABCMeta) class MessagingPublisher(publisher.PublisherBase): def __init__(self, parsed_url): options = urlparse.parse_qs(parsed_url.query) # the value of options is a list of url param values # only take care of the latest one if the option # is provided more than once self.per_meter_topic = bool(int( options.get('per_meter_topic', [0])[-1])) self.policy = options.get('policy', ['default'])[-1] self.max_queue_length = int(options.get( 'max_queue_length', [1024])[-1]) self.max_retry = 0 self.local_queue = [] if self.policy in ['default', 'queue', 'drop']: LOG.info(_LI('Publishing policy set to %s') % self.policy) else: LOG.warning(_('Publishing policy is unknown (%s) force to ' 'default') % self.policy) self.policy = 'default' self.retry = 1 if self.policy in ['queue', 'drop'] else None def publish_samples(self, context, samples): """Publish samples on RPC. :param context: Execution context from the service or RPC call. :param samples: Samples from pipeline after transformation. """ meters = [ utils.meter_message_from_counter( sample, cfg.CONF.publisher.telemetry_secret) for sample in samples ] topic = cfg.CONF.publisher_notifier.metering_topic self.local_queue.append((context, topic, meters)) if self.per_meter_topic: for meter_name, meter_list in itertools.groupby( sorted(meters, key=operator.itemgetter('counter_name')), operator.itemgetter('counter_name')): meter_list = list(meter_list) topic_name = topic + '.' + meter_name LOG.debug('Publishing %(m)d samples on %(n)s', {'m': len(meter_list), 'n': topic_name}) self.local_queue.append((context, topic_name, meter_list)) self.flush() def flush(self): # NOTE(sileht): # this is why the self.local_queue is emptied before processing the # queue and the remaining messages in the queue are added to # self.local_queue after in case of another call having already added # something in the self.local_queue queue = self.local_queue self.local_queue = [] self.local_queue = (self._process_queue(queue, self.policy) + self.local_queue) if self.policy == 'queue': self._check_queue_length() def _check_queue_length(self): queue_length = len(self.local_queue) if queue_length > self.max_queue_length > 0: count = queue_length - self.max_queue_length self.local_queue = self.local_queue[count:] LOG.warning(_("Publisher max local_queue length is exceeded, " "dropping %d oldest samples") % count) def _process_queue(self, queue, policy): current_retry = 0 while queue: context, topic, data = queue[0] try: self._send(context, topic, data) except DeliveryFailure: data = sum([len(m) for __, __, m in queue]) if policy == 'queue': LOG.warning(_("Failed to publish %d datapoints, queue " "them"), data) return queue elif policy == 'drop': LOG.warning(_("Failed to publish %d datapoints, " "dropping them"), data) return [] current_retry += 1 if current_retry >= self.max_retry: LOG.exception(_LE("Failed to retry to send sample data " "with max_retry times")) raise else: queue.pop(0) return [] def publish_events(self, context, events): """Send an event message for publishing :param context: Execution context from the service or RPC call :param events: events from pipeline after transformation """ ev_list = [utils.message_from_event( event, cfg.CONF.publisher.telemetry_secret) for event in events] topic = cfg.CONF.publisher_notifier.event_topic self.local_queue.append((context, topic, ev_list)) self.flush() @abc.abstractmethod def _send(self, context, topic, meters): """Send the meters to the messaging topic.""" class NotifierPublisher(MessagingPublisher): def __init__(self, parsed_url, default_topic): super(NotifierPublisher, self).__init__(parsed_url) options = urlparse.parse_qs(parsed_url.query) topic = options.get('topic', [default_topic])[-1] self.notifier = oslo_messaging.Notifier( messaging.get_transport(), driver=cfg.CONF.publisher_notifier.telemetry_driver, publisher_id='telemetry.publisher.%s' % cfg.CONF.host, topic=topic, retry=self.retry ) def _send(self, context, event_type, data): try: self.notifier.sample(context.to_dict(), event_type=event_type, payload=data) except oslo_messaging.MessageDeliveryFailure as e: raise_delivery_failure(e) class SampleNotifierPublisher(NotifierPublisher): def __init__(self, parsed_url): super(SampleNotifierPublisher, self).__init__( parsed_url, cfg.CONF.publisher_notifier.metering_topic) class EventNotifierPublisher(NotifierPublisher): def __init__(self, parsed_url): super(EventNotifierPublisher, self).__init__( parsed_url, cfg.CONF.publisher_notifier.event_topic) ceilometer-6.1.5/ceilometer/publisher/utils.py0000664000567000056710000001133713072744706022672 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utils for publishers """ import hashlib import hmac from oslo_config import cfg import six from ceilometer import utils OPTS = [ cfg.StrOpt('telemetry_secret', secret=True, default='change this for valid signing', help='Secret value for signing messages. Set value empty if ' 'signing is not required to avoid computational overhead.', deprecated_opts=[cfg.DeprecatedOpt("metering_secret", "DEFAULT"), cfg.DeprecatedOpt("metering_secret", "publisher_rpc"), cfg.DeprecatedOpt("metering_secret", "publisher")] ), ] cfg.CONF.register_opts(OPTS, group="publisher") def compute_signature(message, secret): """Return the signature for a message dictionary.""" if not secret: return '' if isinstance(secret, six.text_type): secret = secret.encode('utf-8') digest_maker = hmac.new(secret, b'', hashlib.sha256) for name, value in utils.recursive_keypairs(message): if name == 'message_signature': # Skip any existing signature value, which would not have # been part of the original message. continue digest_maker.update(six.text_type(name).encode('utf-8')) digest_maker.update(six.text_type(value).encode('utf-8')) return digest_maker.hexdigest() def besteffort_compare_digest(first, second): """Returns True if both string inputs are equal, otherwise False. This function should take a constant amount of time regardless of how many characters in the strings match. """ # NOTE(sileht): compare_digest method protected for timing-attacks # exists since python >= 2.7.7 and python >= 3.3 # this a bit less-secure python fallback version # taken from https://github.com/openstack/python-keystoneclient/blob/ # master/keystoneclient/middleware/memcache_crypt.py#L88 if len(first) != len(second): return False result = 0 if six.PY3 and isinstance(first, bytes) and isinstance(second, bytes): for x, y in zip(first, second): result |= x ^ y else: for x, y in zip(first, second): result |= ord(x) ^ ord(y) return result == 0 if hasattr(hmac, 'compare_digest'): compare_digest = hmac.compare_digest else: compare_digest = besteffort_compare_digest def verify_signature(message, secret): """Check the signature in the message. Message is verified against the value computed from the rest of the contents. """ if not secret: return True old_sig = message.get('message_signature', '') new_sig = compute_signature(message, secret) if isinstance(old_sig, six.text_type): try: old_sig = old_sig.encode('ascii') except UnicodeDecodeError: return False if six.PY3: new_sig = new_sig.encode('ascii') return compare_digest(new_sig, old_sig) def meter_message_from_counter(sample, secret): """Make a metering message ready to be published or stored. Returns a dictionary containing a metering message for a notification message and a Sample instance. """ msg = {'source': sample.source, 'counter_name': sample.name, 'counter_type': sample.type, 'counter_unit': sample.unit, 'counter_volume': sample.volume, 'user_id': sample.user_id, 'project_id': sample.project_id, 'resource_id': sample.resource_id, 'timestamp': sample.timestamp, 'resource_metadata': sample.resource_metadata, 'message_id': sample.id, } msg['message_signature'] = compute_signature(msg, secret) return msg def message_from_event(event, secret): """Make an event message ready to be published or stored. Returns a serialized model of Event containing an event message """ msg = event.serialize() msg['message_signature'] = compute_signature(msg, secret) return msg ceilometer-6.1.5/ceilometer/publisher/kafka_broker.py0000664000567000056710000000731513072744706024154 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Cisco Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import kafka from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import netutils from six.moves.urllib import parse as urlparse from ceilometer.i18n import _LE from ceilometer.publisher import messaging LOG = log.getLogger(__name__) class KafkaBrokerPublisher(messaging.MessagingPublisher): """Publish metering data to kafka broker. The ip address and port number of kafka broker should be configured in ceilometer pipeline configuration file. If an ip address is not specified, this kafka publisher will not publish any meters. To enable this publisher, add the following section to the /etc/ceilometer/pipeline.yaml file or simply add it to an existing pipeline:: meter: - name: meter_kafka interval: 600 counters: - "*" transformers: sinks: - kafka_sink sinks: - name: kafka_sink transformers: publishers: - kafka://[kafka_broker_ip]:[kafka_broker_port]?topic=[topic] Kafka topic name and broker's port are required for this publisher to work properly. If topic parameter is missing, this kafka publisher publish metering data under a topic name, 'ceilometer'. If the port number is not specified, this Kafka Publisher will use 9092 as the broker's port. This publisher has transmit options such as queue, drop, and retry. These options are specified using policy field of URL parameter. When queue option could be selected, local queue length can be determined using max_queue_length field as well. When the transfer fails with retry option, try to resend the data as many times as specified in max_retry field. If max_retry is not specified, default the number of retry is 100. """ def __init__(self, parsed_url): super(KafkaBrokerPublisher, self).__init__(parsed_url) options = urlparse.parse_qs(parsed_url.query) self._producer = None self._host, self._port = netutils.parse_host_port( parsed_url.netloc, default_port=9092) self._topic = options.get('topic', ['ceilometer'])[-1] self.max_retry = int(options.get('max_retry', [100])[-1]) def _ensure_connection(self): if self._producer: return try: client = kafka.KafkaClient("%s:%s" % (self._host, self._port)) self._producer = kafka.SimpleProducer(client) except Exception as e: LOG.exception(_LE("Failed to connect to Kafka service: %s"), e) raise messaging.DeliveryFailure('Kafka Client is not available, ' 'please restart Kafka client') def _send(self, context, event_type, data): self._ensure_connection() # TODO(sileht): don't split the payload into multiple network # message ... but how to do that without breaking consuming # application... try: for d in data: self._producer.send_messages(self._topic, jsonutils.dumps(d)) except Exception as e: messaging.raise_delivery_failure(e) ceilometer-6.1.5/ceilometer/publisher/__init__.py0000664000567000056710000000270113072744706023264 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Intel Corp. # Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_utils import netutils import six from stevedore import driver def get_publisher(url, namespace='ceilometer.publisher'): """Get publisher driver and load it. :param URL: URL for the publisher :param namespace: Namespace to use to look for drivers. """ parse_result = netutils.urlsplit(url) loaded_driver = driver.DriverManager(namespace, parse_result.scheme) return loaded_driver.driver(parse_result) @six.add_metaclass(abc.ABCMeta) class PublisherBase(object): """Base class for plugins that publish data.""" def __init__(self, parsed_url): pass @abc.abstractmethod def publish_samples(self, context, samples): """Publish samples into final conduit.""" @abc.abstractmethod def publish_events(self, context, events): """Publish events into final conduit.""" ceilometer-6.1.5/ceilometer/exchange_control.py0000664000567000056710000000414513072744706023056 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg EXCHANGE_OPTS = [ cfg.StrOpt('heat_control_exchange', default='heat', help="Exchange name for Heat notifications"), cfg.StrOpt('glance_control_exchange', default='glance', help="Exchange name for Glance notifications."), cfg.StrOpt('magnetodb_control_exchange', default='magnetodb', help="Exchange name for Magnetodb notifications."), cfg.StrOpt('keystone_control_exchange', default='keystone', help="Exchange name for Keystone notifications."), cfg.StrOpt('cinder_control_exchange', default='cinder', help="Exchange name for Cinder notifications."), cfg.StrOpt('sahara_control_exchange', default='sahara', help="Exchange name for Data Processing notifications."), cfg.StrOpt('swift_control_exchange', default='swift', help="Exchange name for Swift notifications."), cfg.StrOpt('magnum_control_exchange', default='magnum', help="Exchange name for Magnum notifications."), cfg.StrOpt('trove_control_exchange', default='trove', help="Exchange name for DBaaS notifications."), cfg.StrOpt('zaqar_control_exchange', default='zaqar', help="Exchange name for Messaging service notifications."), cfg.StrOpt('dns_control_exchange', default='central', help="Exchange name for DNS service notifications."), ] ceilometer-6.1.5/ceilometer/pipeline.py0000664000567000056710000007375613072744706021357 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Intel Corp. # Copyright 2014 Red Hat, Inc # # Authors: Yunhong Jiang # Eoghan Glynn # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import hashlib from itertools import chain import os from oslo_config import cfg from oslo_log import log import oslo_messaging from oslo_utils import timeutils import six from stevedore import extension import yaml from ceilometer.event.storage import models from ceilometer.i18n import _, _LI, _LW from ceilometer import publisher from ceilometer.publisher import utils as publisher_utils from ceilometer import sample as sample_util from ceilometer import utils OPTS = [ cfg.StrOpt('pipeline_cfg_file', default="pipeline.yaml", help="Configuration file for pipeline definition." ), cfg.StrOpt('event_pipeline_cfg_file', default="event_pipeline.yaml", help="Configuration file for event pipeline definition." ), cfg.BoolOpt('refresh_pipeline_cfg', default=False, help="Refresh Pipeline configuration on-the-fly." ), cfg.BoolOpt('refresh_event_pipeline_cfg', default=False, help="Refresh Event Pipeline configuration on-the-fly." ), cfg.IntOpt('pipeline_polling_interval', default=20, help="Polling interval for pipeline file configuration" " in seconds." ), ] cfg.CONF.register_opts(OPTS) LOG = log.getLogger(__name__) class PipelineException(Exception): def __init__(self, message, pipeline_cfg): self.msg = message self.pipeline_cfg = pipeline_cfg def __str__(self): return 'Pipeline %s: %s' % (self.pipeline_cfg, self.msg) @six.add_metaclass(abc.ABCMeta) class PipelineEndpoint(object): def __init__(self, context, pipeline): self.filter_rule = oslo_messaging.NotificationFilter( publisher_id=pipeline.name) self.publish_context = PublishContext(context, [pipeline]) @abc.abstractmethod def sample(self, messages): pass class SamplePipelineEndpoint(PipelineEndpoint): def sample(self, messages): samples = chain.from_iterable(m["payload"] for m in messages) samples = [ sample_util.Sample(name=s['counter_name'], type=s['counter_type'], unit=s['counter_unit'], volume=s['counter_volume'], user_id=s['user_id'], project_id=s['project_id'], resource_id=s['resource_id'], timestamp=s['timestamp'], resource_metadata=s['resource_metadata'], source=s.get('source')) for s in samples if publisher_utils.verify_signature( s, cfg.CONF.publisher.telemetry_secret) ] with self.publish_context as p: p(samples) class EventPipelineEndpoint(PipelineEndpoint): def sample(self, messages): events = chain.from_iterable(m["payload"] for m in messages) events = [ models.Event( message_id=ev['message_id'], event_type=ev['event_type'], generated=timeutils.normalize_time( timeutils.parse_isotime(ev['generated'])), traits=[models.Trait(name, dtype, models.Trait.convert_value(dtype, value)) for name, dtype, value in ev['traits']], raw=ev.get('raw', {})) for ev in events if publisher_utils.verify_signature( ev, cfg.CONF.publisher.telemetry_secret) ] try: with self.publish_context as p: p(events) except Exception: if not cfg.CONF.notification.ack_on_event_error: return oslo_messaging.NotificationResult.REQUEUE raise return oslo_messaging.NotificationResult.HANDLED class _PipelineTransportManager(object): def __init__(self): self.transporters = [] @staticmethod def hash_grouping(datapoint, grouping_keys): value = '' for key in grouping_keys or []: value += datapoint.get(key) if datapoint.get(key) else '' return hash(value) def add_transporter(self, transporter): self.transporters.append(transporter) def publisher(self, context): serializer = self.serializer hash_grouping = self.hash_grouping transporters = self.transporters filter_attr = self.filter_attr event_type = self.event_type class PipelinePublishContext(object): def __enter__(self): def p(data): # TODO(gordc): cleanup so payload is always single # datapoint. we can't correctly bucketise # datapoints if batched. data = [data] if not isinstance(data, list) else data for datapoint in data: serialized_data = serializer(datapoint) for d_filter, grouping_keys, notifiers in transporters: if d_filter(serialized_data[filter_attr]): key = (hash_grouping(serialized_data, grouping_keys) % len(notifiers)) notifier = notifiers[key] notifier.sample(context.to_dict(), event_type=event_type, payload=[serialized_data]) return p def __exit__(self, exc_type, exc_value, traceback): pass return PipelinePublishContext() class SamplePipelineTransportManager(_PipelineTransportManager): filter_attr = 'counter_name' event_type = 'ceilometer.pipeline' @staticmethod def serializer(data): return publisher_utils.meter_message_from_counter( data, cfg.CONF.publisher.telemetry_secret) class EventPipelineTransportManager(_PipelineTransportManager): filter_attr = 'event_type' event_type = 'pipeline.event' @staticmethod def serializer(data): return publisher_utils.message_from_event( data, cfg.CONF.publisher.telemetry_secret) class PublishContext(object): def __init__(self, context, pipelines=None): pipelines = pipelines or [] self.pipelines = set(pipelines) self.context = context def add_pipelines(self, pipelines): self.pipelines.update(pipelines) def __enter__(self): def p(data): for p in self.pipelines: p.publish_data(self.context, data) return p def __exit__(self, exc_type, exc_value, traceback): for p in self.pipelines: p.flush(self.context) class Source(object): """Represents a source of samples or events.""" def __init__(self, cfg): self.cfg = cfg try: self.name = cfg['name'] self.sinks = cfg.get('sinks') except KeyError as err: raise PipelineException( "Required field %s not specified" % err.args[0], cfg) def __str__(self): return self.name def check_sinks(self, sinks): if not self.sinks: raise PipelineException( "No sink defined in source %s" % self, self.cfg) for sink in self.sinks: if sink not in sinks: raise PipelineException( "Dangling sink %s from source %s" % (sink, self), self.cfg) def check_source_filtering(self, data, d_type): """Source data rules checking - At least one meaningful datapoint exist - Included type and excluded type can't co-exist on the same pipeline - Included type meter and wildcard can't co-exist at same pipeline """ if not data: raise PipelineException('No %s specified' % d_type, self.cfg) if ([x for x in data if x[0] not in '!*'] and [x for x in data if x[0] == '!']): raise PipelineException( 'Both included and excluded %s specified' % d_type, cfg) if '*' in data and [x for x in data if x[0] not in '!*']: raise PipelineException( 'Included %s specified with wildcard' % d_type, self.cfg) @staticmethod def is_supported(dataset, data_name): # Support wildcard like storage.* and !disk.* # Start with negation, we consider that the order is deny, allow if any(utils.match(data_name, datapoint[1:]) for datapoint in dataset if datapoint[0] == '!'): return False if any(utils.match(data_name, datapoint) for datapoint in dataset if datapoint[0] != '!'): return True # if we only have negation, we suppose the default is allow return all(datapoint.startswith('!') for datapoint in dataset) class EventSource(Source): """Represents a source of events. In effect it is a set of notification handlers capturing events for a set of matching notifications. """ def __init__(self, cfg): super(EventSource, self).__init__(cfg) self.events = cfg.get('events') self.check_source_filtering(self.events, 'events') def support_event(self, event_name): return self.is_supported(self.events, event_name) class SampleSource(Source): """Represents a source of samples. In effect it is a set of pollsters and/or notification handlers emitting samples for a set of matching meters. Each source encapsulates meter name matching, polling interval determination, optional resource enumeration or discovery, and mapping to one or more sinks for publication. """ def __init__(self, cfg): super(SampleSource, self).__init__(cfg) # Support 'counters' for backward compatibility self.meters = cfg.get('meters', cfg.get('counters')) try: self.interval = int(cfg.get('interval', 600)) except ValueError: raise PipelineException("Invalid interval value", cfg) if self.interval <= 0: raise PipelineException("Interval value should > 0", cfg) self.resources = cfg.get('resources') or [] if not isinstance(self.resources, list): raise PipelineException("Resources should be a list", cfg) self.discovery = cfg.get('discovery') or [] if not isinstance(self.discovery, list): raise PipelineException("Discovery should be a list", cfg) self.check_source_filtering(self.meters, 'meters') def get_interval(self): return self.interval def support_meter(self, meter_name): return self.is_supported(self.meters, meter_name) class Sink(object): """Represents a sink for the transformation and publication of data. Each sink config is concerned *only* with the transformation rules and publication conduits for data. In effect, a sink describes a chain of handlers. The chain starts with zero or more transformers and ends with one or more publishers. The first transformer in the chain is passed data from the corresponding source, takes some action such as deriving rate of change, performing unit conversion, or aggregating, before passing the modified data to next step. The subsequent transformers, if any, handle the data similarly. At the end of the chain, publishers publish the data. The exact publishing method depends on publisher type, for example, pushing into data storage via the message bus providing guaranteed delivery, or for loss-tolerant data UDP may be used. If no transformers are included in the chain, the publishers are passed data directly from the sink which are published unchanged. """ def __init__(self, cfg, transformer_manager): self.cfg = cfg try: self.name = cfg['name'] # It's legal to have no transformer specified self.transformer_cfg = cfg.get('transformers') or [] except KeyError as err: raise PipelineException( "Required field %s not specified" % err.args[0], cfg) if not cfg.get('publishers'): raise PipelineException("No publisher specified", cfg) self.publishers = [] for p in cfg['publishers']: if '://' not in p: # Support old format without URL p = p + "://" try: self.publishers.append(publisher.get_publisher(p, self.NAMESPACE)) except Exception: LOG.exception(_("Unable to load publisher %s"), p) self.multi_publish = True if len(self.publishers) > 1 else False self.transformers = self._setup_transformers(cfg, transformer_manager) def __str__(self): return self.name def _setup_transformers(self, cfg, transformer_manager): transformers = [] for transformer in self.transformer_cfg: parameter = transformer['parameters'] or {} try: ext = transformer_manager[transformer['name']] except KeyError: raise PipelineException( "No transformer named %s loaded" % transformer['name'], cfg) transformers.append(ext.plugin(**parameter)) LOG.info(_LI( "Pipeline %(pipeline)s: Setup transformer instance %(name)s " "with parameter %(param)s") % ({'pipeline': self, 'name': transformer['name'], 'param': parameter})) return transformers class EventSink(Sink): NAMESPACE = 'ceilometer.event.publisher' def publish_events(self, ctxt, events): if events: for p in self.publishers: try: p.publish_events(ctxt, events) except Exception: LOG.exception(_("Pipeline %(pipeline)s: %(status)s" " after error from publisher %(pub)s") % ({'pipeline': self, 'status': 'Continue' if self.multi_publish else 'Exit', 'pub': p} )) if not self.multi_publish: raise def flush(self, ctxt): """Flush data after all events have been injected to pipeline.""" pass class SampleSink(Sink): NAMESPACE = 'ceilometer.publisher' def _transform_sample(self, start, ctxt, sample): try: for transformer in self.transformers[start:]: sample = transformer.handle_sample(ctxt, sample) if not sample: LOG.debug( "Pipeline %(pipeline)s: Sample dropped by " "transformer %(trans)s", {'pipeline': self, 'trans': transformer}) return return sample except Exception as err: # TODO(gordc): only use one log level. LOG.warning(_("Pipeline %(pipeline)s: " "Exit after error from transformer " "%(trans)s for %(smp)s") % ({'pipeline': self, 'trans': transformer, 'smp': sample})) LOG.exception(err) def _publish_samples(self, start, ctxt, samples): """Push samples into pipeline for publishing. :param start: The first transformer that the sample will be injected. This is mainly for flush() invocation that transformer may emit samples. :param ctxt: Execution context from the manager or service. :param samples: Sample list. """ transformed_samples = [] if not self.transformers: transformed_samples = samples else: for sample in samples: LOG.debug( "Pipeline %(pipeline)s: Transform sample " "%(smp)s from %(trans)s transformer", {'pipeline': self, 'smp': sample, 'trans': start}) sample = self._transform_sample(start, ctxt, sample) if sample: transformed_samples.append(sample) if transformed_samples: for p in self.publishers: try: p.publish_samples(ctxt, transformed_samples) except Exception: LOG.exception(_( "Pipeline %(pipeline)s: Continue after error " "from publisher %(pub)s") % ({'pipeline': self, 'pub': p})) def publish_samples(self, ctxt, samples): self._publish_samples(0, ctxt, samples) def flush(self, ctxt): """Flush data after all samples have been injected to pipeline.""" for (i, transformer) in enumerate(self.transformers): try: self._publish_samples(i + 1, ctxt, list(transformer.flush(ctxt))) except Exception as err: LOG.warning(_( "Pipeline %(pipeline)s: Error flushing " "transformer %(trans)s") % ({'pipeline': self, 'trans': transformer})) LOG.exception(err) @six.add_metaclass(abc.ABCMeta) class Pipeline(object): """Represents a coupling between a sink and a corresponding source.""" def __init__(self, source, sink): self.source = source self.sink = sink self.name = str(self) def __str__(self): return (self.source.name if self.source.name == self.sink.name else '%s:%s' % (self.source.name, self.sink.name)) def flush(self, ctxt): self.sink.flush(ctxt) @property def publishers(self): return self.sink.publishers @abc.abstractmethod def publish_data(self, ctxt, data): """Publish data from pipeline.""" class EventPipeline(Pipeline): """Represents a pipeline for Events.""" def __str__(self): # NOTE(gordc): prepend a namespace so we ensure event and sample # pipelines do not have the same name. return 'event:%s' % super(EventPipeline, self).__str__() def support_event(self, event_type): return self.source.support_event(event_type) def publish_data(self, ctxt, events): if not isinstance(events, list): events = [events] supported = [e for e in events if self.source.support_event(e.event_type)] self.sink.publish_events(ctxt, supported) class SamplePipeline(Pipeline): """Represents a pipeline for Samples.""" def get_interval(self): return self.source.interval @property def resources(self): return self.source.resources @property def discovery(self): return self.source.discovery def support_meter(self, meter_name): return self.source.support_meter(meter_name) def _validate_volume(self, s): volume = s.volume if volume is None: LOG.warning(_LW( 'metering data %(counter_name)s for %(resource_id)s ' '@ %(timestamp)s has no volume (volume: None), the sample will' ' be dropped') % {'counter_name': s.name, 'resource_id': s.resource_id, 'timestamp': s.timestamp if s.timestamp else 'NO TIMESTAMP'} ) return False if not isinstance(volume, (int, float)): try: volume = float(volume) except ValueError: LOG.warning(_LW( 'metering data %(counter_name)s for %(resource_id)s ' '@ %(timestamp)s has volume which is not a number ' '(volume: %(counter_volume)s), the sample will be dropped') % {'counter_name': s.name, 'resource_id': s.resource_id, 'timestamp': ( s.timestamp if s.timestamp else 'NO TIMESTAMP'), 'counter_volume': volume} ) return False return True def publish_data(self, ctxt, samples): if not isinstance(samples, list): samples = [samples] supported = [s for s in samples if self.source.support_meter(s.name) and self._validate_volume(s)] self.sink.publish_samples(ctxt, supported) SAMPLE_TYPE = {'pipeline': SamplePipeline, 'source': SampleSource, 'sink': SampleSink} EVENT_TYPE = {'pipeline': EventPipeline, 'source': EventSource, 'sink': EventSink} class PipelineManager(object): """Pipeline Manager Pipeline manager sets up pipelines according to config file Usually only one pipeline manager exists in the system. """ def __init__(self, cfg, transformer_manager, p_type=SAMPLE_TYPE): """Setup the pipelines according to config. The configuration is supported as follows: Decoupled: the source and sink configuration are separately specified before being linked together. This allows source- specific configuration, such as resource discovery, to be kept focused only on the fine-grained source while avoiding the necessity for wide duplication of sink-related config. The configuration is provided in the form of separate lists of dictionaries defining sources and sinks, for example: {"sources": [{"name": source_1, "interval": interval_time, "meters" : ["meter_1", "meter_2"], "resources": ["resource_uri1", "resource_uri2"], "sinks" : ["sink_1", "sink_2"] }, {"name": source_2, "interval": interval_time, "meters" : ["meter_3"], "sinks" : ["sink_2"] }, ], "sinks": [{"name": sink_1, "transformers": [ {"name": "Transformer_1", "parameters": {"p1": "value"}}, {"name": "Transformer_2", "parameters": {"p1": "value"}}, ], "publishers": ["publisher_1", "publisher_2"] }, {"name": sink_2, "publishers": ["publisher_3"] }, ] } The interval determines the cadence of sample injection into the pipeline where samples are produced under the direct control of an agent, i.e. via a polling cycle as opposed to incoming notifications. Valid meter format is '*', '!meter_name', or 'meter_name'. '*' is wildcard symbol means any meters; '!meter_name' means "meter_name" will be excluded; 'meter_name' means 'meter_name' will be included. The 'meter_name" is Sample name field. Valid meters definition is all "included meter names", all "excluded meter names", wildcard and "excluded meter names", or only wildcard. The resources is list of URI indicating the resources from where the meters should be polled. It's optional and it's up to the specific pollster to decide how to use it. Transformer's name is plugin name in setup.cfg. Publisher's name is plugin name in setup.cfg """ self.pipelines = [] if not ('sources' in cfg and 'sinks' in cfg): raise PipelineException("Both sources & sinks are required", cfg) LOG.info(_LI('detected decoupled pipeline config format')) unique_names = set() sources = [] for s in cfg.get('sources', []): name = s.get('name') if name in unique_names: raise PipelineException("Duplicated source names: %s" % name, self) else: unique_names.add(name) sources.append(p_type['source'](s)) unique_names.clear() sinks = {} for s in cfg.get('sinks', []): name = s.get('name') if name in unique_names: raise PipelineException("Duplicated sink names: %s" % name, self) else: unique_names.add(name) sinks[s['name']] = p_type['sink'](s, transformer_manager) unique_names.clear() for source in sources: source.check_sinks(sinks) for target in source.sinks: pipe = p_type['pipeline'](source, sinks[target]) if pipe.name in unique_names: raise PipelineException( "Duplicate pipeline name: %s. Ensure pipeline" " names are unique. (name is the source and sink" " names combined)" % pipe.name, cfg) else: unique_names.add(pipe.name) self.pipelines.append(pipe) unique_names.clear() def publisher(self, context): """Build a new Publisher for these manager pipelines. :param context: The context. """ return PublishContext(context, self.pipelines) class PollingManager(object): """Polling Manager Polling manager sets up polling according to config file. """ def __init__(self, cfg): """Setup the polling according to config. The configuration is the sources half of the Pipeline Config. """ self.sources = [] if not ('sources' in cfg and 'sinks' in cfg): raise PipelineException("Both sources & sinks are required", cfg) LOG.info(_LI('detected decoupled pipeline config format')) unique_names = set() for s in cfg.get('sources', []): name = s.get('name') if name in unique_names: raise PipelineException("Duplicated source names: %s" % name, self) else: unique_names.add(name) self.sources.append(SampleSource(s)) unique_names.clear() def _setup_pipeline_manager(cfg_file, transformer_manager, p_type=SAMPLE_TYPE): if not os.path.exists(cfg_file): cfg_file = cfg.CONF.find_file(cfg_file) LOG.debug("Pipeline config file: %s", cfg_file) with open(cfg_file) as fap: data = fap.read() pipeline_cfg = yaml.safe_load(data) LOG.info(_LI("Pipeline config: %s"), pipeline_cfg) return PipelineManager(pipeline_cfg, transformer_manager or extension.ExtensionManager( 'ceilometer.transformer', ), p_type) def _setup_polling_manager(cfg_file): if not os.path.exists(cfg_file): cfg_file = cfg.CONF.find_file(cfg_file) LOG.debug("Polling config file: %s", cfg_file) with open(cfg_file) as fap: data = fap.read() pipeline_cfg = yaml.safe_load(data) LOG.info(_LI("Pipeline config: %s"), pipeline_cfg) return PollingManager(pipeline_cfg) def setup_event_pipeline(transformer_manager=None): """Setup event pipeline manager according to yaml config file.""" cfg_file = cfg.CONF.event_pipeline_cfg_file return _setup_pipeline_manager(cfg_file, transformer_manager, EVENT_TYPE) def setup_pipeline(transformer_manager=None): """Setup pipeline manager according to yaml config file.""" cfg_file = cfg.CONF.pipeline_cfg_file return _setup_pipeline_manager(cfg_file, transformer_manager) def _get_pipeline_cfg_file(p_type=SAMPLE_TYPE): if p_type == EVENT_TYPE: cfg_file = cfg.CONF.event_pipeline_cfg_file else: cfg_file = cfg.CONF.pipeline_cfg_file if not os.path.exists(cfg_file): cfg_file = cfg.CONF.find_file(cfg_file) return cfg_file def get_pipeline_mtime(p_type=SAMPLE_TYPE): cfg_file = _get_pipeline_cfg_file(p_type) return os.path.getmtime(cfg_file) def get_pipeline_hash(p_type=SAMPLE_TYPE): cfg_file = _get_pipeline_cfg_file(p_type) with open(cfg_file) as fap: data = fap.read() if six.PY3: data = data.encode('utf-8') file_hash = hashlib.md5(data).hexdigest() return file_hash def setup_polling(): """Setup polling manager according to yaml config file.""" cfg_file = cfg.CONF.pipeline_cfg_file return _setup_polling_manager(cfg_file) def get_pipeline_grouping_key(pipe): keys = [] for transformer in pipe.sink.transformers: keys += transformer.grouping_keys return list(set(keys)) ceilometer-6.1.5/ceilometer/objectstore/0000775000567000056710000000000013072745164021500 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/objectstore/rgw.py0000664000567000056710000001750113072744706022656 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Reliance Jio Infocomm Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common code for working with ceph object stores """ from keystoneauth1 import exceptions from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six.moves.urllib.parse as urlparse from ceilometer.agent import plugin_base from ceilometer import keystone_client from ceilometer import sample LOG = log.getLogger(__name__) SERVICE_OPTS = [ cfg.StrOpt('radosgw', default='object-store', help='Radosgw service type.'), ] CREDENTIAL_OPTS = [ cfg.StrOpt('access_key', secret=True, help='Access key for Radosgw Admin.'), cfg.StrOpt('secret_key', secret=True, help='Secret key for Radosgw Admin.') ] cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') cfg.CONF.register_opts(CREDENTIAL_OPTS, group='rgw_admin_credentials') cfg.CONF.import_group('rgw_admin_credentials', 'ceilometer.service') class _Base(plugin_base.PollsterBase): METHOD = 'bucket' _ENDPOINT = None def __init__(self): self.access_key = cfg.CONF.rgw_admin_credentials.access_key self.secret = cfg.CONF.rgw_admin_credentials.secret_key @property def default_discovery(self): return 'tenant' @property def CACHE_KEY_METHOD(self): return 'rgw.get_%s' % self.METHOD @staticmethod def _get_endpoint(ksclient): # we store the endpoint as a base class attribute, so keystone is # only ever called once, also we assume that in a single deployment # we may be only deploying `radosgw` or `swift` as the object-store if _Base._ENDPOINT is None: try: conf = cfg.CONF.service_credentials rgw_url = keystone_client.get_service_catalog( ksclient).url_for( service_type=cfg.CONF.service_types.radosgw, interface=conf.interface) _Base._ENDPOINT = urlparse.urljoin(rgw_url, '/admin') except exceptions.EndpointNotFound: LOG.debug("Radosgw endpoint not found") return _Base._ENDPOINT def _iter_accounts(self, ksclient, cache, tenants): if self.CACHE_KEY_METHOD not in cache: cache[self.CACHE_KEY_METHOD] = list(self._get_account_info( ksclient, tenants)) return iter(cache[self.CACHE_KEY_METHOD]) def _get_account_info(self, ksclient, tenants): endpoint = self._get_endpoint(ksclient) if not endpoint: raise StopIteration() try: from ceilometer.objectstore.rgw_client import RGWAdminClient rgw_client = RGWAdminClient(endpoint, self.access_key, self.secret) except ImportError: raise plugin_base.PollsterPermanentError(tenants) for t in tenants: api_method = 'get_%s' % self.METHOD yield t.id, getattr(rgw_client, api_method)(t.id) class ContainersObjectsPollster(_Base): """Get info about object counts in a container using RGW Admin APIs.""" def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): for it in bucket_info['buckets']: yield sample.Sample( name='radosgw.containers.objects', type=sample.TYPE_GAUGE, volume=int(it.num_objects), unit='object', user_id=None, project_id=tenant, resource_id=tenant + '/' + it.name, timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) class ContainersSizePollster(_Base): """Get info about object sizes in a container using RGW Admin APIs.""" def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): for it in bucket_info['buckets']: yield sample.Sample( name='radosgw.containers.objects.size', type=sample.TYPE_GAUGE, volume=int(it.size * 1024), unit='B', user_id=None, project_id=tenant, resource_id=tenant + '/' + it.name, timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) class ObjectsSizePollster(_Base): """Iterate over all accounts, using keystone.""" def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): yield sample.Sample( name='radosgw.objects.size', type=sample.TYPE_GAUGE, volume=int(bucket_info['size'] * 1024), unit='B', user_id=None, project_id=tenant, resource_id=tenant, timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) class ObjectsPollster(_Base): """Iterate over all accounts, using keystone.""" def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): yield sample.Sample( name='radosgw.objects', type=sample.TYPE_GAUGE, volume=int(bucket_info['num_objects']), unit='object', user_id=None, project_id=tenant, resource_id=tenant, timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) class ObjectsContainersPollster(_Base): def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): yield sample.Sample( name='radosgw.objects.containers', type=sample.TYPE_GAUGE, volume=int(bucket_info['num_buckets']), unit='object', user_id=None, project_id=tenant, resource_id=tenant, timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) class UsagePollster(_Base): METHOD = 'usage' def get_samples(self, manager, cache, resources): for tenant, usage in self._iter_accounts(manager.keystone, cache, resources): yield sample.Sample( name='radosgw.api.request', type=sample.TYPE_GAUGE, volume=int(usage), unit='request', user_id=None, project_id=tenant, resource_id=tenant, timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) ceilometer-6.1.5/ceilometer/objectstore/swift.py0000664000567000056710000001647013072744706023217 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common code for working with object stores """ from __future__ import absolute_import from keystoneauth1 import exceptions from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six.moves.urllib.parse as urlparse from swiftclient import client as swift from ceilometer.agent import plugin_base from ceilometer.i18n import _LI from ceilometer import keystone_client from ceilometer import sample LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('reseller_prefix', default='AUTH_', help="Swift reseller prefix. Must be on par with " "reseller_prefix in proxy-server.conf."), ] SERVICE_OPTS = [ cfg.StrOpt('swift', default='object-store', help='Swift service type.'), ] cfg.CONF.register_opts(OPTS) cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') class _Base(plugin_base.PollsterBase): METHOD = 'head' _ENDPOINT = None @property def default_discovery(self): return 'tenant' @property def CACHE_KEY_METHOD(self): return 'swift.%s_account' % self.METHOD @staticmethod def _get_endpoint(ksclient): # we store the endpoint as a base class attribute, so keystone is # only ever called once if _Base._ENDPOINT is None: try: conf = cfg.CONF.service_credentials _Base._ENDPOINT = keystone_client.get_service_catalog( ksclient).url_for( service_type=cfg.CONF.service_types.swift, interface=conf.interface, region_name=conf.region_name) except exceptions.EndpointNotFound as e: LOG.info(_LI("Swift endpoint not found: %s"), e) return _Base._ENDPOINT def _iter_accounts(self, ksclient, cache, tenants): if self.CACHE_KEY_METHOD not in cache: cache[self.CACHE_KEY_METHOD] = list(self._get_account_info( ksclient, tenants)) return iter(cache[self.CACHE_KEY_METHOD]) def _get_account_info(self, ksclient, tenants): endpoint = self._get_endpoint(ksclient) if not endpoint: raise StopIteration() for t in tenants: api_method = '%s_account' % self.METHOD yield (t.id, getattr(swift, api_method) (self._neaten_url(endpoint, t.id), keystone_client.get_auth_token(ksclient))) @staticmethod def _neaten_url(endpoint, tenant_id): """Transform the registered url to standard and valid format.""" return urlparse.urljoin(endpoint.split('/v1')[0].rstrip('/') + '/', 'v1/' + cfg.CONF.reseller_prefix + tenant_id) class ObjectsPollster(_Base): """Iterate over all accounts, using keystone.""" def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): yield sample.Sample( name='storage.objects', type=sample.TYPE_GAUGE, volume=int(account['x-account-object-count']), unit='object', user_id=None, project_id=tenant, resource_id=tenant, timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) class ObjectsSizePollster(_Base): """Iterate over all accounts, using keystone.""" def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): yield sample.Sample( name='storage.objects.size', type=sample.TYPE_GAUGE, volume=int(account['x-account-bytes-used']), unit='B', user_id=None, project_id=tenant, resource_id=tenant, timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) class ObjectsContainersPollster(_Base): """Iterate over all accounts, using keystone.""" def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): yield sample.Sample( name='storage.objects.containers', type=sample.TYPE_GAUGE, volume=int(account['x-account-container-count']), unit='container', user_id=None, project_id=tenant, resource_id=tenant, timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) class ContainersObjectsPollster(_Base): """Get info about containers using Swift API.""" METHOD = 'get' def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): containers_info = account[1] for container in containers_info: yield sample.Sample( name='storage.containers.objects', type=sample.TYPE_GAUGE, volume=int(container['count']), unit='object', user_id=None, project_id=tenant, resource_id=tenant + '/' + container['name'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) class ContainersSizePollster(_Base): """Get info about containers using Swift API.""" METHOD = 'get' def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): containers_info = account[1] for container in containers_info: yield sample.Sample( name='storage.containers.objects.size', type=sample.TYPE_GAUGE, volume=int(container['bytes']), unit='B', user_id=None, project_id=tenant, resource_id=tenant + '/' + container['name'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) ceilometer-6.1.5/ceilometer/objectstore/rgw_client.py0000664000567000056710000000474313072744706024220 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Reliance Jio Infocomm Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import namedtuple from awsauth import S3Auth import requests import six.moves.urllib.parse as urlparse from ceilometer.i18n import _ class RGWAdminAPIFailed(Exception): pass class RGWAdminClient(object): Bucket = namedtuple('Bucket', 'name, num_objects, size') def __init__(self, endpoint, access_key, secret_key): self.access_key = access_key self.secret = secret_key self.endpoint = endpoint self.hostname = urlparse.urlparse(endpoint).netloc def _make_request(self, path, req_params): uri = "{0}/{1}".format(self.endpoint, path) r = requests.get(uri, params=req_params, auth=S3Auth(self.access_key, self.secret, self.hostname) ) if r.status_code != 200: raise RGWAdminAPIFailed( _('RGW AdminOps API returned %(status)s %(reason)s') % {'status': r.status_code, 'reason': r.reason}) return r.json() def get_bucket(self, tenant_id): path = "bucket" req_params = {"uid": tenant_id, "stats": "true"} json_data = self._make_request(path, req_params) stats = {'num_buckets': 0, 'buckets': [], 'size': 0, 'num_objects': 0} stats['num_buckets'] = len(json_data) for it in json_data: for k, v in it["usage"].items(): stats['num_objects'] += v["num_objects"] stats['size'] += v["size_kb"] stats['buckets'].append(self.Bucket(it["bucket"], v["num_objects"], v["size_kb"])) return stats def get_usage(self, tenant_id): path = "usage" req_params = {"uid": tenant_id} json_data = self._make_request(path, req_params) usage_data = json_data["summary"] return sum((it["total"]["ops"] for it in usage_data)) ceilometer-6.1.5/ceilometer/objectstore/__init__.py0000664000567000056710000000000013072744703023575 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/api/0000775000567000056710000000000013072745164017726 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/api/hooks.py0000664000567000056710000000617713072744705021436 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import oslo_messaging from pecan import hooks from ceilometer.i18n import _LE from ceilometer import messaging from ceilometer import storage LOG = log.getLogger(__name__) cfg.CONF.import_opt('telemetry_driver', 'ceilometer.publisher.messaging', group='publisher_notifier') class ConfigHook(hooks.PecanHook): """Attach the configuration object to the request. That allows controllers to get it. """ @staticmethod def before(state): state.request.cfg = cfg.CONF class DBHook(hooks.PecanHook): def __init__(self): self.storage_connection = DBHook.get_connection('metering') self.event_storage_connection = DBHook.get_connection('event') if (not self.storage_connection and not self.event_storage_connection): raise Exception("Api failed to start. Failed to connect to " "databases, purpose: %s" % ', '.join(['metering', 'event'])) def before(self, state): state.request.storage_conn = self.storage_connection state.request.event_storage_conn = self.event_storage_connection @staticmethod def get_connection(purpose): try: return storage.get_connection_from_config(cfg.CONF, purpose) except Exception as err: params = {"purpose": purpose, "err": err} LOG.exception(_LE("Failed to connect to db, purpose %(purpose)s " "retry later: %(err)s") % params) class NotifierHook(hooks.PecanHook): """Create and attach a notifier to the request. Usually, samples will be push to notification bus by notifier when they are posted via /v2/meters/ API. """ def __init__(self): transport = messaging.get_transport() self.notifier = oslo_messaging.Notifier( transport, driver=cfg.CONF.publisher_notifier.telemetry_driver, publisher_id="ceilometer.api") def before(self, state): state.request.notifier = self.notifier class TranslationHook(hooks.PecanHook): def after(self, state): # After a request has been done, we need to see if # ClientSideError has added an error onto the response. # If it has we need to get it info the thread-safe WSGI # environ to be used by the ParsableErrorMiddleware. if hasattr(state.response, 'translatable_error'): state.request.environ['translatable_error'] = ( state.response.translatable_error) ceilometer-6.1.5/ceilometer/api/rbac.py0000664000567000056710000000637313072744705021220 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2014 Hewlett-Packard Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Access Control Lists (ACL's) control access the API server.""" from oslo_config import cfg from oslo_policy import policy import pecan _ENFORCER = None CONF = cfg.CONF def reset(): global _ENFORCER if _ENFORCER: _ENFORCER.clear() _ENFORCER = None def _has_rule(name): return name in _ENFORCER.rules.keys() def enforce(policy_name, request): """Return the user and project the request should be limited to. :param request: HTTP request :param policy_name: the policy name to validate authz against. """ global _ENFORCER if not _ENFORCER: _ENFORCER = policy.Enforcer(CONF) _ENFORCER.load_rules() rule_method = "telemetry:" + policy_name headers = request.headers policy_dict = dict() policy_dict['roles'] = headers.get('X-Roles', "").split(",") policy_dict['user_id'] = (headers.get('X-User-Id')) policy_dict['project_id'] = (headers.get('X-Project-Id')) # maintain backward compat with Juno and previous by allowing the action if # there is no rule defined for it if ((_has_rule('default') or _has_rule(rule_method)) and not _ENFORCER.enforce(rule_method, {}, policy_dict)): pecan.core.abort(status_code=403, detail='RBAC Authorization Failed') # TODO(fabiog): these methods are still used because the scoping part is really # convoluted and difficult to separate out. def get_limited_to(headers): """Return the user and project the request should be limited to. :param headers: HTTP headers dictionary :return: A tuple of (user, project), set to None if there's no limit on one of these. """ global _ENFORCER if not _ENFORCER: _ENFORCER = policy.Enforcer(CONF) _ENFORCER.load_rules() policy_dict = dict() policy_dict['roles'] = headers.get('X-Roles', "").split(",") policy_dict['user_id'] = (headers.get('X-User-Id')) policy_dict['project_id'] = (headers.get('X-Project-Id')) # maintain backward compat with Juno and previous by using context_is_admin # rule if the segregation rule (added in Kilo) is not defined rule_name = 'segregation' if _has_rule( 'segregation') else 'context_is_admin' if not _ENFORCER.enforce(rule_name, {}, policy_dict): return headers.get('X-User-Id'), headers.get('X-Project-Id') return None, None def get_limited_to_project(headers): """Return the project the request should be limited to. :param headers: HTTP headers dictionary :return: A project, or None if there's no limit on it. """ return get_limited_to(headers)[1] ceilometer-6.1.5/ceilometer/api/app.wsgi0000664000567000056710000000164413072744705021406 0ustar jenkinsjenkins00000000000000# -*- mode: python -*- # # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Use this file for deploying the API under mod_wsgi. See http://pecan.readthedocs.org/en/latest/deployment.html for details. """ from ceilometer import service from ceilometer.api import app # Initialize the oslo configuration library and logging service.prepare_service([]) application = app.load_app() ceilometer-6.1.5/ceilometer/api/middleware.py0000664000567000056710000001231613072744705022420 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp. # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Middleware to replace the plain text message body of an error response with one formatted so the client can parse it. Based on pecan.middleware.errordocument """ import json from lxml import etree from oslo_log import log import six import webob from ceilometer import i18n from ceilometer.i18n import _ LOG = log.getLogger(__name__) class ParsableErrorMiddleware(object): """Replace error body with something the client can parse.""" @staticmethod def best_match_language(accept_language): """Determines best available locale from the Accept-Language header. :returns: the best language match or None if the 'Accept-Language' header was not available in the request. """ if not accept_language: return None all_languages = i18n.get_available_languages() return accept_language.best_match(all_languages) def __init__(self, app): self.app = app def __call__(self, environ, start_response): # Request for this state, modified by replace_start_response() # and used when an error is being reported. state = {} def replacement_start_response(status, headers, exc_info=None): """Overrides the default response to make errors parsable.""" try: status_code = int(status.split(' ')[0]) state['status_code'] = status_code except (ValueError, TypeError): # pragma: nocover raise Exception(( 'ErrorDocumentMiddleware received an invalid ' 'status %s' % status )) else: if (state['status_code'] // 100) not in (2, 3): # Remove some headers so we can replace them later # when we have the full error message and can # compute the length. headers = [(h, v) for (h, v) in headers if h not in ('Content-Length', 'Content-Type') ] # Save the headers in case we need to modify them. state['headers'] = headers return start_response(status, headers, exc_info) app_iter = self.app(environ, replacement_start_response) if (state['status_code'] // 100) not in (2, 3): req = webob.Request(environ) error = environ.get('translatable_error') user_locale = self.best_match_language(req.accept_language) if (req.accept.best_match(['application/json', 'application/xml']) == 'application/xml'): content_type = 'application/xml' try: # simple check xml is valid fault = etree.fromstring(b'\n'.join(app_iter)) # Add the translated error to the xml data if error is not None: for fault_string in fault.findall('faultstring'): fault_string.text = i18n.translate(error, user_locale) error_message = etree.tostring(fault) body = b''.join((b'', error_message, b'')) except etree.XMLSyntaxError as err: LOG.error(_('Error parsing HTTP response: %s'), err) error_message = state['status_code'] body = '%s' % error_message if six.PY3: body = body.encode('utf-8') else: content_type = 'application/json' app_data = b'\n'.join(app_iter) if six.PY3: app_data = app_data.decode('utf-8') try: fault = json.loads(app_data) if error is not None and 'faultstring' in fault: fault['faultstring'] = i18n.translate(error, user_locale) except ValueError as err: fault = app_data body = json.dumps({'error_message': fault}) if six.PY3: body = body.encode('utf-8') state['headers'].append(('Content-Length', str(len(body)))) state['headers'].append(('Content-Type', content_type)) body = [body] else: body = app_iter return body ceilometer-6.1.5/ceilometer/api/app.py0000664000567000056710000000726413072744705021071 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os from oslo_config import cfg from oslo_log import log from paste import deploy import pecan from werkzeug import serving from ceilometer.api import hooks from ceilometer.api import middleware from ceilometer.i18n import _LI, _LW LOG = log.getLogger(__name__) CONF = cfg.CONF OPTS = [ cfg.StrOpt('api_paste_config', default="api_paste.ini", help="Configuration file for WSGI definition of API." ), ] API_OPTS = [ cfg.BoolOpt('pecan_debug', default=False, help='Toggle Pecan Debug Middleware.'), cfg.IntOpt('default_api_return_limit', min=1, default=100, help='Default maximum number of items returned by API request.' ), ] CONF.register_opts(OPTS) CONF.register_opts(API_OPTS, group='api') def setup_app(pecan_config=None): # FIXME: Replace DBHook with a hooks.TransactionHook app_hooks = [hooks.ConfigHook(), hooks.DBHook(), hooks.NotifierHook(), hooks.TranslationHook()] pecan_config = pecan_config or { "app": { 'root': 'ceilometer.api.controllers.root.RootController', 'modules': ['ceilometer.api'], } } pecan.configuration.set_config(dict(pecan_config), overwrite=True) # NOTE(sileht): pecan debug won't work in multi-process environment pecan_debug = CONF.api.pecan_debug if CONF.api.workers and CONF.api.workers != 1 and pecan_debug: pecan_debug = False LOG.warning(_LW('pecan_debug cannot be enabled, if workers is > 1, ' 'the value is overrided with False')) app = pecan.make_app( pecan_config['app']['root'], debug=pecan_debug, hooks=app_hooks, wrap_app=middleware.ParsableErrorMiddleware, guess_content_type_from_ext=False ) return app def load_app(): # Build the WSGI app cfg_file = None cfg_path = cfg.CONF.api_paste_config if not os.path.isabs(cfg_path): cfg_file = CONF.find_file(cfg_path) elif os.path.exists(cfg_path): cfg_file = cfg_path if not cfg_file: raise cfg.ConfigFilesNotFoundError([cfg.CONF.api_paste_config]) LOG.info("Full WSGI config used: %s" % cfg_file) return deploy.loadapp("config:" + cfg_file) def build_server(): app = load_app() # Create the WSGI server and start it host, port = cfg.CONF.api.host, cfg.CONF.api.port LOG.info(_LI('Starting server in PID %s') % os.getpid()) LOG.info(_LI("Configuration:")) cfg.CONF.log_opt_values(LOG, logging.INFO) if host == '0.0.0.0': LOG.info(_LI( 'serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s') % ({'sport': port, 'vport': port})) else: LOG.info(_LI("serving on http://%(host)s:%(port)s") % ( {'host': host, 'port': port})) serving.run_simple(cfg.CONF.api.host, cfg.CONF.api.port, app, processes=CONF.api.workers) def app_factory(global_config, **local_conf): return setup_app() ceilometer-6.1.5/ceilometer/api/controllers/0000775000567000056710000000000013072745164022274 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/api/controllers/root.py0000664000567000056710000000355113072744703023633 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from ceilometer.api.controllers.v2 import root as v2 MEDIA_TYPE_JSON = 'application/vnd.openstack.telemetry-%s+json' MEDIA_TYPE_XML = 'application/vnd.openstack.telemetry-%s+xml' class RootController(object): def __init__(self): self.v2 = v2.V2Controller() @pecan.expose('json') def index(self): base_url = pecan.request.application_url available = [{'tag': 'v2', 'date': '2013-02-13T00:00:00Z', }] collected = [version_descriptor(base_url, v['tag'], v['date']) for v in available] versions = {'versions': {'values': collected}} return versions def version_descriptor(base_url, version, released_on): url = version_url(base_url, version) return { 'id': version, 'links': [ {'href': url, 'rel': 'self', }, {'href': 'http://docs.openstack.org/', 'rel': 'describedby', 'type': 'text/html', }], 'media-types': [ {'base': 'application/json', 'type': MEDIA_TYPE_JSON % version, }, {'base': 'application/xml', 'type': MEDIA_TYPE_XML % version, }], 'status': 'stable', 'updated': released_on, } def version_url(base_url, version_number): return '%s/%s' % (base_url, version_number) ceilometer-6.1.5/ceilometer/api/controllers/v2/0000775000567000056710000000000013072745164022623 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/api/controllers/v2/root.py0000664000567000056710000001671113072744705024166 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import exceptions from oslo_config import cfg from oslo_log import log from oslo_utils import strutils import pecan from ceilometer.api.controllers.v2 import capabilities from ceilometer.api.controllers.v2 import events from ceilometer.api.controllers.v2 import meters from ceilometer.api.controllers.v2 import query from ceilometer.api.controllers.v2 import resources from ceilometer.api.controllers.v2 import samples from ceilometer.i18n import _, _LW from ceilometer import keystone_client API_OPTS = [ cfg.BoolOpt('gnocchi_is_enabled', default=None, help=('Set True to disable resource/meter/sample URLs. ' 'Default autodetection by querying keystone.')), cfg.BoolOpt('aodh_is_enabled', default=None, help=('Set True to redirect alarms URLs to aodh. ' 'Default autodetection by querying keystone.')), cfg.StrOpt('aodh_url', default=None, help=('The endpoint of Aodh to redirect alarms URLs ' 'to Aodh API. Default autodetection by querying ' 'keystone.')), ] cfg.CONF.register_opts(API_OPTS, group='api') cfg.CONF.import_opt('meter_dispatchers', 'ceilometer.dispatcher') LOG = log.getLogger(__name__) def gnocchi_abort(): pecan.abort(410, ("This telemetry installation is configured to use " "Gnocchi. Please use the Gnocchi API available on " "the metric endpoint to retrieve data.")) def aodh_abort(): pecan.abort(410, _("alarms URLs is unavailable when Aodh is " "disabled or unavailable.")) def aodh_redirect(url): # NOTE(sileht): we use 307 and not 301 or 302 to allow # client to redirect POST/PUT/DELETE/... # FIXME(sileht): it would be better to use 308, but webob # doesn't handle it :( # https://github.com/Pylons/webob/pull/207 pecan.redirect(location=url + pecan.request.path_qs, code=307) class QueryController(object): def __init__(self, gnocchi_is_enabled=False, aodh_url=None): self.gnocchi_is_enabled = gnocchi_is_enabled self.aodh_url = aodh_url @pecan.expose() def _lookup(self, kind, *remainder): if kind == 'alarms' and self.aodh_url: aodh_redirect(self.aodh_url) elif kind == 'alarms': aodh_abort() elif kind == 'samples' and self.gnocchi_is_enabled: gnocchi_abort() elif kind == 'samples': return query.QuerySamplesController(), remainder else: pecan.abort(404) class V2Controller(object): """Version 2 API controller root.""" event_types = events.EventTypesController() events = events.EventsController() capabilities = capabilities.CapabilitiesController() def __init__(self): self._gnocchi_is_enabled = None self._aodh_is_enabled = None self._aodh_url = None @property def gnocchi_is_enabled(self): if self._gnocchi_is_enabled is None: if cfg.CONF.api.gnocchi_is_enabled is not None: self._gnocchi_is_enabled = cfg.CONF.api.gnocchi_is_enabled elif ("gnocchi" not in cfg.CONF.meter_dispatchers or "database" in cfg.CONF.meter_dispatchers): self._gnocchi_is_enabled = False else: try: catalog = keystone_client.get_service_catalog( keystone_client.get_client()) catalog.url_for(service_type='metric') except exceptions.EndpointNotFound: self._gnocchi_is_enabled = False except exceptions.ClientException: LOG.warning(_LW("Can't connect to keystone, assuming " "gnocchi is disabled and retry later")) else: self._gnocchi_is_enabled = True LOG.warning(_LW("ceilometer-api started with gnocchi " "enabled. The resources/meters/samples " "URLs are disabled.")) return self._gnocchi_is_enabled @property def aodh_url(self): if self._aodh_url is None: if cfg.CONF.api.aodh_is_enabled is False: self._aodh_url = "" elif cfg.CONF.api.aodh_url is not None: self._aodh_url = self._normalize_aodh_url( cfg.CONF.api.aodh_url) else: try: catalog = keystone_client.get_service_catalog( keystone_client.get_client()) self._aodh_url = self._normalize_aodh_url( catalog.url_for(service_type='alarming')) except exceptions.EndpointNotFound: self._aodh_url = "" except exceptions.ClientException: LOG.warning(_LW("Can't connect to keystone, assuming aodh " "is disabled and retry later.")) else: LOG.warning(_LW("ceilometer-api started with aodh " "enabled. Alarms URLs will be redirected " "to aodh endpoint.")) return self._aodh_url @pecan.expose() def _lookup(self, kind, *remainder): if (kind in ['meters', 'resources', 'samples'] and self.gnocchi_is_enabled): if kind == 'meters' and pecan.request.method == 'POST': direct = pecan.request.params.get('direct', '') if strutils.bool_from_string(direct): pecan.abort(400, _('direct option cannot be true when ' 'Gnocchi is enabled.')) return meters.MetersController(), remainder gnocchi_abort() elif kind == 'meters': return meters.MetersController(), remainder elif kind == 'resources': return resources.ResourcesController(), remainder elif kind == 'samples': return samples.SamplesController(), remainder elif kind == 'query': return QueryController( gnocchi_is_enabled=self.gnocchi_is_enabled, aodh_url=self.aodh_url, ), remainder elif kind == 'alarms' and (not self.aodh_url): aodh_abort() elif kind == 'alarms' and self.aodh_url: aodh_redirect(self.aodh_url) else: pecan.abort(404) @staticmethod def _normalize_aodh_url(url): if url.endswith("/"): return url[:-1] return url ceilometer-6.1.5/ceilometer/api/controllers/v2/meters.py0000664000567000056710000004421013072744705024475 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import datetime from oslo_config import cfg from oslo_context import context from oslo_log import log from oslo_utils import strutils from oslo_utils import timeutils import pecan from pecan import rest import six import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from ceilometer.api.controllers.v2 import base from ceilometer.api.controllers.v2 import utils as v2_utils from ceilometer.api import rbac from ceilometer.i18n import _ from ceilometer.publisher import utils as publisher_utils from ceilometer import sample from ceilometer import storage from ceilometer import utils LOG = log.getLogger(__name__) class OldSample(base.Base): """A single measurement for a given meter and resource. This class is deprecated in favor of Sample. """ source = wtypes.text "The ID of the source that identifies where the sample comes from" counter_name = wsme.wsattr(wtypes.text, mandatory=True) "The name of the meter" # FIXME(dhellmann): Make this meter_name? counter_type = wsme.wsattr(wtypes.text, mandatory=True) "The type of the meter (see :ref:`measurements`)" # FIXME(dhellmann): Make this meter_type? counter_unit = wsme.wsattr(wtypes.text, mandatory=True) "The unit of measure for the value in counter_volume" # FIXME(dhellmann): Make this meter_unit? counter_volume = wsme.wsattr(float, mandatory=True) "The actual measured value" user_id = wtypes.text "The ID of the user who last triggered an update to the resource" project_id = wtypes.text "The ID of the project or tenant that owns the resource" resource_id = wsme.wsattr(wtypes.text, mandatory=True) "The ID of the :class:`Resource` for which the measurements are taken" timestamp = datetime.datetime "UTC date and time when the measurement was made" recorded_at = datetime.datetime "When the sample has been recorded." resource_metadata = {wtypes.text: wtypes.text} "Arbitrary metadata associated with the resource" message_id = wtypes.text "A unique identifier for the sample" def __init__(self, counter_volume=None, resource_metadata=None, timestamp=None, **kwds): resource_metadata = resource_metadata or {} if counter_volume is not None: counter_volume = float(counter_volume) resource_metadata = v2_utils.flatten_metadata(resource_metadata) # this is to make it easier for clients to pass a timestamp in if timestamp and isinstance(timestamp, six.string_types): timestamp = timeutils.parse_isotime(timestamp) super(OldSample, self).__init__(counter_volume=counter_volume, resource_metadata=resource_metadata, timestamp=timestamp, **kwds) if self.resource_metadata in (wtypes.Unset, None): self.resource_metadata = {} @classmethod def sample(cls): return cls(source='openstack', counter_name='instance', counter_type='gauge', counter_unit='instance', counter_volume=1, resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', project_id='35b17138-b364-4e6a-a131-8f3099c5be68', user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff', recorded_at=datetime.datetime.utcnow(), timestamp=datetime.datetime.utcnow(), resource_metadata={'name1': 'value1', 'name2': 'value2'}, message_id='5460acce-4fd6-480d-ab18-9735ec7b1996', ) class Statistics(base.Base): """Computed statistics for a query.""" groupby = {wtypes.text: wtypes.text} "Dictionary of field names for group, if groupby statistics are requested" unit = wtypes.text "The unit type of the data set" min = float "The minimum volume seen in the data" max = float "The maximum volume seen in the data" avg = float "The average of all of the volume values seen in the data" sum = float "The total of all of the volume values seen in the data" count = int "The number of samples seen" aggregate = {wtypes.text: float} "The selectable aggregate value(s)" duration = float "The difference, in seconds, between the oldest and newest timestamp" duration_start = datetime.datetime "UTC date and time of the earliest timestamp, or the query start time" duration_end = datetime.datetime "UTC date and time of the oldest timestamp, or the query end time" period = int "The difference, in seconds, between the period start and end" period_start = datetime.datetime "UTC date and time of the period start" period_end = datetime.datetime "UTC date and time of the period end" def __init__(self, start_timestamp=None, end_timestamp=None, **kwds): super(Statistics, self).__init__(**kwds) self._update_duration(start_timestamp, end_timestamp) def _update_duration(self, start_timestamp, end_timestamp): # "Clamp" the timestamps we return to the original time # range, excluding the offset. if (start_timestamp and self.duration_start and self.duration_start < start_timestamp): self.duration_start = start_timestamp LOG.debug('clamping min timestamp to range') if (end_timestamp and self.duration_end and self.duration_end > end_timestamp): self.duration_end = end_timestamp LOG.debug('clamping max timestamp to range') # If we got valid timestamps back, compute a duration in seconds. # # If the min > max after clamping then we know the # timestamps on the samples fell outside of the time # range we care about for the query, so treat them as # "invalid." # # If the timestamps are invalid, return None as a # sentinel indicating that there is something "funny" # about the range. if (self.duration_start and self.duration_end and self.duration_start <= self.duration_end): self.duration = timeutils.delta_seconds(self.duration_start, self.duration_end) else: self.duration_start = self.duration_end = self.duration = None @classmethod def sample(cls): return cls(unit='GiB', min=1, max=9, avg=4.5, sum=45, count=10, duration_start=datetime.datetime(2013, 1, 4, 16, 42), duration_end=datetime.datetime(2013, 1, 4, 16, 47), period=7200, period_start=datetime.datetime(2013, 1, 4, 16, 00), period_end=datetime.datetime(2013, 1, 4, 18, 00), ) class Aggregate(base.Base): func = wsme.wsattr(wtypes.text, mandatory=True) "The aggregation function name" param = wsme.wsattr(wtypes.text, default=None) "The paramter to the aggregation function" def __init__(self, **kwargs): super(Aggregate, self).__init__(**kwargs) @staticmethod def validate(aggregate): return aggregate @classmethod def sample(cls): return cls(func='cardinality', param='resource_id') def _validate_groupby_fields(groupby_fields): """Checks that the list of groupby fields from request is valid. If all fields are valid, returns fields with duplicates removed. """ # NOTE(terriyu): Currently, metadata fields are supported in our # group by statistics implementation only for mongodb valid_fields = set(['user_id', 'resource_id', 'project_id', 'source', 'resource_metadata.instance_type']) invalid_fields = set(groupby_fields) - valid_fields if invalid_fields: raise wsme.exc.UnknownArgument(invalid_fields, "Invalid groupby fields") # Remove duplicate fields # NOTE(terriyu): This assumes that we don't care about the order of the # group by fields. return list(set(groupby_fields)) class MeterController(rest.RestController): """Manages operations on a single meter.""" _custom_actions = { 'statistics': ['GET'], } def __init__(self, meter_name): pecan.request.context['meter_name'] = meter_name self.meter_name = meter_name @wsme_pecan.wsexpose([OldSample], [base.Query], int) def get_all(self, q=None, limit=None): """Return samples for the meter. :param q: Filter rules for the data to be returned. :param limit: Maximum number of samples to return. """ rbac.enforce('get_samples', pecan.request) q = q or [] limit = v2_utils.enforce_limit(limit) kwargs = v2_utils.query_to_kwargs(q, storage.SampleFilter.__init__) kwargs['meter'] = self.meter_name f = storage.SampleFilter(**kwargs) return [OldSample.from_db_model(e) for e in pecan.request.storage_conn.get_samples(f, limit=limit) ] @wsme_pecan.wsexpose([OldSample], str, body=[OldSample], status_code=201) def post(self, direct='', samples=None): """Post a list of new Samples to Telemetry. :param direct: a flag indicates whether the samples will be posted directly to storage or not. :param samples: a list of samples within the request body. """ rbac.enforce('create_samples', pecan.request) direct = strutils.bool_from_string(direct) if not samples: msg = _('Samples should be included in request body') raise base.ClientSideError(msg) now = timeutils.utcnow() auth_project = rbac.get_limited_to_project(pecan.request.headers) def_source = pecan.request.cfg.sample_source def_project_id = pecan.request.headers.get('X-Project-Id') def_user_id = pecan.request.headers.get('X-User-Id') published_samples = [] for s in samples: if self.meter_name != s.counter_name: raise wsme.exc.InvalidInput('counter_name', s.counter_name, 'should be %s' % self.meter_name) if s.message_id: raise wsme.exc.InvalidInput('message_id', s.message_id, 'The message_id must not be set') if s.counter_type not in sample.TYPES: raise wsme.exc.InvalidInput('counter_type', s.counter_type, 'The counter type must be: ' + ', '.join(sample.TYPES)) s.user_id = (s.user_id or def_user_id) s.project_id = (s.project_id or def_project_id) s.source = '%s:%s' % (s.project_id, (s.source or def_source)) s.timestamp = (s.timestamp or now) if auth_project and auth_project != s.project_id: # non admin user trying to cross post to another project_id auth_msg = 'can not post samples to other projects' raise wsme.exc.InvalidInput('project_id', s.project_id, auth_msg) published_sample = sample.Sample( name=s.counter_name, type=s.counter_type, unit=s.counter_unit, volume=s.counter_volume, user_id=s.user_id, project_id=s.project_id, resource_id=s.resource_id, timestamp=s.timestamp.isoformat(), resource_metadata=utils.restore_nesting(s.resource_metadata, separator='.'), source=s.source) s.message_id = published_sample.id sample_dict = publisher_utils.meter_message_from_counter( published_sample, cfg.CONF.publisher.telemetry_secret) if direct: ts = timeutils.parse_isotime(sample_dict['timestamp']) sample_dict['timestamp'] = timeutils.normalize_time(ts) pecan.request.storage_conn.record_metering_data(sample_dict) else: published_samples.append(sample_dict) if not direct: ctxt = context.RequestContext(user=def_user_id, tenant=def_project_id, is_admin=True) notifier = pecan.request.notifier notifier.sample(ctxt.to_dict(), 'telemetry.api', {'samples': published_samples}) return samples @wsme_pecan.wsexpose([Statistics], [base.Query], [six.text_type], int, [Aggregate]) def statistics(self, q=None, groupby=None, period=None, aggregate=None): """Computes the statistics of the samples in the time range given. :param q: Filter rules for the data to be returned. :param groupby: Fields for group by aggregation :param period: Returned result will be an array of statistics for a period long of that number of seconds. :param aggregate: The selectable aggregation functions to be applied. """ rbac.enforce('compute_statistics', pecan.request) q = q or [] groupby = groupby or [] aggregate = aggregate or [] if period and period < 0: raise base.ClientSideError(_("Period must be positive.")) kwargs = v2_utils.query_to_kwargs(q, storage.SampleFilter.__init__) kwargs['meter'] = self.meter_name f = storage.SampleFilter(**kwargs) g = _validate_groupby_fields(groupby) aggregate = utils.uniq(aggregate, ['func', 'param']) # Find the original timestamp in the query to use for clamping # the duration returned in the statistics. start = end = None for i in q: if i.field == 'timestamp' and i.op in ('lt', 'le'): end = timeutils.parse_isotime(i.value).replace( tzinfo=None) elif i.field == 'timestamp' and i.op in ('gt', 'ge'): start = timeutils.parse_isotime(i.value).replace( tzinfo=None) try: computed = pecan.request.storage_conn.get_meter_statistics( f, period, g, aggregate) return [Statistics(start_timestamp=start, end_timestamp=end, **c.as_dict()) for c in computed] except OverflowError as e: params = dict(period=period, err=e) raise base.ClientSideError( _("Invalid period %(period)s: %(err)s") % params) class Meter(base.Base): """One category of measurements.""" name = wtypes.text "The unique name for the meter" type = wtypes.Enum(str, *sample.TYPES) "The meter type (see :ref:`measurements`)" unit = wtypes.text "The unit of measure" resource_id = wtypes.text "The ID of the :class:`Resource` for which the measurements are taken" project_id = wtypes.text "The ID of the project or tenant that owns the resource" user_id = wtypes.text "The ID of the user who last triggered an update to the resource" source = wtypes.text "The ID of the source that identifies where the meter comes from" meter_id = wtypes.text "The unique identifier for the meter" def __init__(self, **kwargs): meter_id = '%s+%s' % (kwargs['resource_id'], kwargs['name']) # meter_id is of type Unicode but base64.encodestring() only accepts # strings. See bug #1333177 meter_id = base64.b64encode(meter_id.encode('utf-8')) kwargs['meter_id'] = meter_id super(Meter, self).__init__(**kwargs) @classmethod def sample(cls): return cls(name='instance', type='gauge', unit='instance', resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', project_id='35b17138-b364-4e6a-a131-8f3099c5be68', user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff', source='openstack', ) class MetersController(rest.RestController): """Works on meters.""" @pecan.expose() def _lookup(self, meter_name, *remainder): return MeterController(meter_name), remainder @wsme_pecan.wsexpose([Meter], [base.Query], int, str) def get_all(self, q=None, limit=None, unique=''): """Return all known meters, based on the data recorded so far. :param q: Filter rules for the meters to be returned. :param unique: flag to indicate unique meters to be returned. """ rbac.enforce('get_meters', pecan.request) q = q or [] # Timestamp field is not supported for Meter queries limit = v2_utils.enforce_limit(limit) kwargs = v2_utils.query_to_kwargs( q, pecan.request.storage_conn.get_meters, ['limit'], allow_timestamps=False) return [Meter.from_db_model(m) for m in pecan.request.storage_conn.get_meters( limit=limit, unique=strutils.bool_from_string(unique), **kwargs)] ceilometer-6.1.5/ceilometer/api/controllers/v2/base.py0000664000567000056710000002067413072744705024120 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import datetime import functools import inspect import json from oslo_utils import strutils from oslo_utils import timeutils import pecan import six import wsme from wsme import types as wtypes from ceilometer.i18n import _ operation_kind = ('lt', 'le', 'eq', 'ne', 'ge', 'gt') operation_kind_enum = wtypes.Enum(str, *operation_kind) class ClientSideError(wsme.exc.ClientSideError): def __init__(self, error, status_code=400): pecan.response.translatable_error = error super(ClientSideError, self).__init__(error, status_code) class EntityNotFound(ClientSideError): def __init__(self, entity, id): super(EntityNotFound, self).__init__( _("%(entity)s %(id)s Not Found") % {'entity': entity, 'id': id}, status_code=404) class ProjectNotAuthorized(ClientSideError): def __init__(self, id, aspect='project'): params = dict(aspect=aspect, id=id) super(ProjectNotAuthorized, self).__init__( _("Not Authorized to access %(aspect)s %(id)s") % params, status_code=401) class AdvEnum(wtypes.wsproperty): """Handle default and mandatory for wtypes.Enum.""" def __init__(self, name, *args, **kwargs): self._name = '_advenum_%s' % name self._default = kwargs.pop('default', None) mandatory = kwargs.pop('mandatory', False) enum = wtypes.Enum(*args, **kwargs) super(AdvEnum, self).__init__(datatype=enum, fget=self._get, fset=self._set, mandatory=mandatory) def _get(self, parent): if hasattr(parent, self._name): value = getattr(parent, self._name) return value or self._default return self._default def _set(self, parent, value): try: if self.datatype.validate(value): setattr(parent, self._name, value) except ValueError as e: raise wsme.exc.InvalidInput(self._name.replace('_advenum_', '', 1), value, e) class Base(wtypes.DynamicBase): @classmethod def from_db_model(cls, m): return cls(**(m.as_dict())) @classmethod def from_db_and_links(cls, m, links): return cls(links=links, **(m.as_dict())) def as_dict(self, db_model): valid_keys = inspect.getargspec(db_model.__init__)[0] if 'self' in valid_keys: valid_keys.remove('self') return self.as_dict_from_keys(valid_keys) def as_dict_from_keys(self, keys): return dict((k, getattr(self, k)) for k in keys if hasattr(self, k) and getattr(self, k) != wsme.Unset) class Link(Base): """A link representation.""" href = wtypes.text "The url of a link" rel = wtypes.text "The name of a link" @classmethod def sample(cls): return cls(href=('http://localhost:8777/v2/meters/volume?' 'q.field=resource_id&' 'q.value=bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'), rel='volume' ) class Query(Base): """Query filter.""" # The data types supported by the query. _supported_types = ['integer', 'float', 'string', 'boolean', 'datetime'] # Functions to convert the data field to the correct type. _type_converters = {'integer': int, 'float': float, 'boolean': functools.partial( strutils.bool_from_string, strict=True), 'string': six.text_type, 'datetime': timeutils.parse_isotime} _op = None # provide a default def get_op(self): return self._op or 'eq' def set_op(self, value): self._op = value field = wsme.wsattr(wtypes.text, mandatory=True) "The name of the field to test" # op = wsme.wsattr(operation_kind, default='eq') # this ^ doesn't seem to work. op = wsme.wsproperty(operation_kind_enum, get_op, set_op) "The comparison operator. Defaults to 'eq'." value = wsme.wsattr(wtypes.text, mandatory=True) "The value to compare against the stored data" type = wtypes.text "The data type of value to compare against the stored data" def __repr__(self): # for logging calls return '' % (self.field, self.op, self.value, self.type) @classmethod def sample(cls): return cls(field='resource_id', op='eq', value='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', type='string' ) def as_dict(self): return self.as_dict_from_keys(['field', 'op', 'type', 'value']) def _get_value_as_type(self, forced_type=None): """Convert metadata value to the specified data type. This method is called during metadata query to help convert the querying metadata to the data type specified by user. If there is no data type given, the metadata will be parsed by ast.literal_eval to try to do a smart converting. NOTE (flwang) Using "_" as prefix to avoid an InvocationError raised from wsmeext/sphinxext.py. It's OK to call it outside the Query class. Because the "public" side of that class is actually the outside of the API, and the "private" side is the API implementation. The method is only used in the API implementation, so it's OK. :returns: metadata value converted with the specified data type. """ type = forced_type or self.type try: converted_value = self.value if not type: try: converted_value = ast.literal_eval(self.value) except (ValueError, SyntaxError): # Unable to convert the metadata value automatically # let it default to self.value pass else: if type not in self._supported_types: # Types must be explicitly declared so the # correct type converter may be used. Subclasses # of Query may define _supported_types and # _type_converters to define their own types. raise TypeError() converted_value = self._type_converters[type](self.value) if isinstance(converted_value, datetime.datetime): converted_value = timeutils.normalize_time(converted_value) except ValueError: msg = (_('Unable to convert the value %(value)s' ' to the expected data type %(type)s.') % {'value': self.value, 'type': type}) raise ClientSideError(msg) except TypeError: msg = (_('The data type %(type)s is not supported. The supported' ' data type list is: %(supported)s') % {'type': type, 'supported': self._supported_types}) raise ClientSideError(msg) except Exception: msg = (_('Unexpected exception converting %(value)s to' ' the expected data type %(type)s.') % {'value': self.value, 'type': type}) raise ClientSideError(msg) return converted_value class JsonType(wtypes.UserType): """A simple JSON type.""" basetype = wtypes.text name = 'json' @staticmethod def validate(value): # check that value can be serialised json.dumps(value) return value ceilometer-6.1.5/ceilometer/api/controllers/v2/samples.py0000664000567000056710000001104113072744705024636 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid from oslo_utils import timeutils import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from ceilometer.api.controllers.v2 import base from ceilometer.api.controllers.v2 import utils from ceilometer.api import rbac from ceilometer.i18n import _ from ceilometer import sample from ceilometer import storage class Sample(base.Base): """One measurement.""" id = wtypes.text "The unique identifier for the sample." meter = wtypes.text "The meter name this sample is for." type = wtypes.Enum(str, *sample.TYPES) "The meter type (see :ref:`meter_types`)" unit = wtypes.text "The unit of measure." volume = float "The metered value." user_id = wtypes.text "The user this sample was taken for." project_id = wtypes.text "The project this sample was taken for." resource_id = wtypes.text "The :class:`Resource` this sample was taken for." source = wtypes.text "The source that identifies where the sample comes from." timestamp = datetime.datetime "When the sample has been generated." recorded_at = datetime.datetime "When the sample has been recorded." metadata = {wtypes.text: wtypes.text} "Arbitrary metadata associated with the sample." @classmethod def from_db_model(cls, m): return cls(id=m.message_id, meter=m.counter_name, type=m.counter_type, unit=m.counter_unit, volume=m.counter_volume, user_id=m.user_id, project_id=m.project_id, resource_id=m.resource_id, source=m.source, timestamp=m.timestamp, recorded_at=m.recorded_at, metadata=utils.flatten_metadata(m.resource_metadata)) @classmethod def sample(cls): return cls(id=str(uuid.uuid1()), meter='instance', type='gauge', unit='instance', volume=1, resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', project_id='35b17138-b364-4e6a-a131-8f3099c5be68', user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff', timestamp=timeutils.utcnow(), recorded_at=datetime.datetime.utcnow(), source='openstack', metadata={'name1': 'value1', 'name2': 'value2'}, ) class SamplesController(rest.RestController): """Controller managing the samples.""" @wsme_pecan.wsexpose([Sample], [base.Query], int) def get_all(self, q=None, limit=None): """Return all known samples, based on the data recorded so far. :param q: Filter rules for the samples to be returned. :param limit: Maximum number of samples to be returned. """ rbac.enforce('get_samples', pecan.request) q = q or [] limit = utils.enforce_limit(limit) kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) f = storage.SampleFilter(**kwargs) return map(Sample.from_db_model, pecan.request.storage_conn.get_samples(f, limit=limit)) @wsme_pecan.wsexpose(Sample, wtypes.text) def get_one(self, sample_id): """Return a sample. :param sample_id: the id of the sample. """ rbac.enforce('get_sample', pecan.request) f = storage.SampleFilter(message_id=sample_id) samples = list(pecan.request.storage_conn.get_samples(f)) if len(samples) < 1: raise base.EntityNotFound(_('Sample'), sample_id) return Sample.from_db_model(samples[0]) ceilometer-6.1.5/ceilometer/api/controllers/v2/resources.py0000664000567000056710000001335113072744705025212 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import pecan from pecan import rest import six from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from ceilometer.api.controllers.v2 import base from ceilometer.api.controllers.v2 import utils from ceilometer.api import rbac from ceilometer.i18n import _ class Resource(base.Base): """An externally defined object for which samples have been received.""" resource_id = wtypes.text "The unique identifier for the resource" project_id = wtypes.text "The ID of the owning project or tenant" user_id = wtypes.text "The ID of the user who created the resource or updated it last" first_sample_timestamp = datetime.datetime "UTC date & time not later than the first sample known for this resource" last_sample_timestamp = datetime.datetime "UTC date & time not earlier than the last sample known for this resource" metadata = {wtypes.text: wtypes.text} "Arbitrary metadata associated with the resource" links = [base.Link] "A list containing a self link and associated meter links" source = wtypes.text "The source where the resource come from" def __init__(self, metadata=None, **kwds): metadata = metadata or {} metadata = utils.flatten_metadata(metadata) super(Resource, self).__init__(metadata=metadata, **kwds) @classmethod def sample(cls): return cls( resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', project_id='35b17138-b364-4e6a-a131-8f3099c5be68', user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff', timestamp=datetime.datetime.utcnow(), source="openstack", metadata={'name1': 'value1', 'name2': 'value2'}, links=[ base.Link(href=('http://localhost:8777/v2/resources/' 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'), rel='self'), base.Link(href=('http://localhost:8777/v2/meters/volume?' 'q.field=resource_id&q.value=' 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'), rel='volume') ], ) class ResourcesController(rest.RestController): """Works on resources.""" @staticmethod def _make_link(rel_name, url, type, type_arg, query=None): query_str = '' if query: query_str = '?q.field=%s&q.value=%s' % (query['field'], query['value']) return base.Link(href='%s/v2/%s/%s%s' % (url, type, type_arg, query_str), rel=rel_name) def _resource_links(self, resource_id, meter_links=1): links = [self._make_link('self', pecan.request.application_url, 'resources', resource_id)] if meter_links: for meter in pecan.request.storage_conn.get_meters( resource=resource_id): query = {'field': 'resource_id', 'value': resource_id} links.append(self._make_link(meter.name, pecan.request.application_url, 'meters', meter.name, query=query)) return links @wsme_pecan.wsexpose(Resource, six.text_type) def get_one(self, resource_id): """Retrieve details about one resource. :param resource_id: The UUID of the resource. """ rbac.enforce('get_resource', pecan.request) authorized_project = rbac.get_limited_to_project(pecan.request.headers) resources = list(pecan.request.storage_conn.get_resources( resource=resource_id, project=authorized_project)) if not resources: raise base.EntityNotFound(_('Resource'), resource_id) return Resource.from_db_and_links(resources[0], self._resource_links(resource_id)) @wsme_pecan.wsexpose([Resource], [base.Query], int, int) def get_all(self, q=None, limit=None, meter_links=1): """Retrieve definitions of all of the resources. :param q: Filter rules for the resources to be returned. :param meter_links: option to include related meter links """ rbac.enforce('get_resources', pecan.request) q = q or [] limit = utils.enforce_limit(limit) kwargs = utils.query_to_kwargs( q, pecan.request.storage_conn.get_resources, ['limit']) resources = [ Resource.from_db_and_links(r, self._resource_links(r.resource_id, meter_links)) for r in pecan.request.storage_conn.get_resources(limit=limit, **kwargs)] return resources ceilometer-6.1.5/ceilometer/api/controllers/v2/events.py0000664000567000056710000002634213072744705024510 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_log import log import pecan from pecan import rest import six import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from ceilometer.api.controllers.v2 import base from ceilometer.api.controllers.v2 import utils as v2_utils from ceilometer.api import rbac from ceilometer.event.storage import models as event_models from ceilometer.i18n import _ from ceilometer import storage LOG = log.getLogger(__name__) class TraitDescription(base.Base): """A description of a trait, with no associated value.""" type = wtypes.text "the data type, defaults to string" name = wtypes.text "the name of the trait" @classmethod def sample(cls): return cls(name='service', type='string' ) class EventQuery(base.Query): """Query arguments for Event Queries.""" _supported_types = ['integer', 'float', 'string', 'datetime'] type = wsme.wsattr(wtypes.text, default='string') "the type of the trait filter, defaults to string" def __repr__(self): # for logging calls return '' % (self.field, self.op, self._get_value_as_type(), self.type) @classmethod def sample(cls): return cls(field="event_type", type="string", op="eq", value="compute.instance.create.start") class Trait(base.Base): """A Trait associated with an event.""" name = wtypes.text "The name of the trait" value = wtypes.text "the value of the trait" type = wtypes.text "the type of the trait (string, integer, float or datetime)" @staticmethod def _convert_storage_trait(trait): """Helper method to convert a storage model into an API trait instance. If an API trait instance is passed in, just return it. """ if isinstance(trait, Trait): return trait value = (six.text_type(trait.value) if not trait.dtype == event_models.Trait.DATETIME_TYPE else trait.value.isoformat()) trait_type = event_models.Trait.get_name_by_type(trait.dtype) return Trait(name=trait.name, type=trait_type, value=value) @classmethod def sample(cls): return cls(name='service', type='string', value='compute.hostname' ) class Event(base.Base): """A System event.""" message_id = wtypes.text "The message ID for the notification" event_type = wtypes.text "The type of the event" _traits = None def get_traits(self): return self._traits def set_traits(self, traits): self._traits = map(Trait._convert_storage_trait, traits) traits = wsme.wsproperty(wtypes.ArrayType(Trait), get_traits, set_traits) "Event specific properties" generated = datetime.datetime "The time the event occurred" raw = base.JsonType() "The raw copy of notification" @classmethod def sample(cls): return cls( event_type='compute.instance.update', generated=datetime.datetime(2015, 1, 1, 12, 30, 59, 123456), message_id='94834db1-8f1b-404d-b2ec-c35901f1b7f0', traits={ Trait(name='request_id', value='req-4e2d67b8-31a4-48af-bb2f-9df72a353a72'), Trait(name='service', value='conductor.tem-devstack-01'), Trait(name='tenant_id', value='7f13f2b17917463b9ee21aa92c4b36d6') }, raw={'status': {'nested': 'started'}} ) def _build_rbac_query_filters(): filters = {'t_filter': [], 'admin_proj': None} # Returns user_id, proj_id for non-admins user_id, proj_id = rbac.get_limited_to(pecan.request.headers) # If non-admin, filter events by user and project if user_id and proj_id: filters['t_filter'].append({"key": "project_id", "string": proj_id, "op": "eq"}) filters['t_filter'].append({"key": "user_id", "string": user_id, "op": "eq"}) elif not user_id and not proj_id: filters['admin_proj'] = pecan.request.headers.get('X-Project-Id') return filters def _event_query_to_event_filter(q): evt_model_filter = { 'event_type': None, 'message_id': None, 'start_timestamp': None, 'end_timestamp': None } filters = _build_rbac_query_filters() traits_filter = filters['t_filter'] admin_proj = filters['admin_proj'] for i in q: if not i.op: i.op = 'eq' elif i.op not in base.operation_kind: error = (_('Operator %(operator)s is not supported. The supported' ' operators are: %(supported)s') % {'operator': i.op, 'supported': base.operation_kind}) raise base.ClientSideError(error) if i.field in evt_model_filter: if i.op != 'eq' and i.field in ('event_type', 'message_id'): error = (_('Operator %(operator)s is not supported. Only' ' `eq\' operator is available for field' ' %(field)s') % {'operator': i.op, 'field': i.field}) raise base.ClientSideError(error) if i.op != 'ge' and i.field == 'start_timestamp': error = (_('Operator %(operator)s is not supported. Only' ' `ge\' operator is available for field' ' %(field)s') % {'operator': i.op, 'field': i.field}) raise base.ClientSideError(error) if i.op != 'le' and i.field == 'end_timestamp': error = (_('Operator %(operator)s is not supported. Only' ' `le\' operator is available for field' ' %(field)s') % {'operator': i.op, 'field': i.field}) raise base.ClientSideError(error) evt_model_filter[i.field] = i.value else: trait_type = i.type or 'string' traits_filter.append({"key": i.field, trait_type: i._get_value_as_type(), "op": i.op}) return storage.EventFilter(traits_filter=traits_filter, admin_proj=admin_proj, **evt_model_filter) class TraitsController(rest.RestController): """Works on Event Traits.""" @v2_utils.requires_admin @wsme_pecan.wsexpose([Trait], wtypes.text, wtypes.text) def get_one(self, event_type, trait_name): """Return all instances of a trait for an event type. :param event_type: Event type to filter traits by :param trait_name: Trait to return values for """ LOG.debug("Getting traits for %s", event_type) return [Trait._convert_storage_trait(t) for t in pecan.request.event_storage_conn .get_traits(event_type, trait_name)] @v2_utils.requires_admin @wsme_pecan.wsexpose([TraitDescription], wtypes.text) def get_all(self, event_type): """Return all trait names for an event type. :param event_type: Event type to filter traits by """ get_trait_name = event_models.Trait.get_name_by_type return [TraitDescription(name=t['name'], type=get_trait_name(t['data_type'])) for t in pecan.request.event_storage_conn .get_trait_types(event_type)] class EventTypesController(rest.RestController): """Works on Event Types in the system.""" traits = TraitsController() @v2_utils.requires_admin @wsme_pecan.wsexpose(None, wtypes.text) def get_one(self, event_type): """Unused API, will always return 404. :param event_type: A event type """ pecan.abort(404) @v2_utils.requires_admin @wsme_pecan.wsexpose([six.text_type]) def get_all(self): """Get all event types.""" return list(pecan.request.event_storage_conn.get_event_types()) class EventsController(rest.RestController): """Works on Events.""" @v2_utils.requires_context @wsme_pecan.wsexpose([Event], [EventQuery], int) def get_all(self, q=None, limit=None): """Return all events matching the query filters. :param q: Filter arguments for which Events to return :param limit: Maximum number of samples to be returned. """ rbac.enforce("events:index", pecan.request) q = q or [] limit = v2_utils.enforce_limit(limit) event_filter = _event_query_to_event_filter(q) return [Event(message_id=event.message_id, event_type=event.event_type, generated=event.generated, traits=event.traits, raw=event.raw) for event in pecan.request.event_storage_conn.get_events(event_filter, limit)] @v2_utils.requires_context @wsme_pecan.wsexpose(Event, wtypes.text) def get_one(self, message_id): """Return a single event with the given message id. :param message_id: Message ID of the Event to be returned """ rbac.enforce("events:show", pecan.request) filters = _build_rbac_query_filters() t_filter = filters['t_filter'] admin_proj = filters['admin_proj'] event_filter = storage.EventFilter(traits_filter=t_filter, admin_proj=admin_proj, message_id=message_id) events = [event for event in pecan.request.event_storage_conn.get_events(event_filter)] if not events: raise base.EntityNotFound(_("Event"), message_id) if len(events) > 1: LOG.error(_("More than one event with " "id %s returned from storage driver") % message_id) event = events[0] return Event(message_id=event.message_id, event_type=event.event_type, generated=event.generated, traits=event.traits, raw=event.raw) ceilometer-6.1.5/ceilometer/api/controllers/v2/query.py0000664000567000056710000003273213072744705024351 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import jsonschema from oslo_log import log from oslo_utils import timeutils import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from ceilometer.api.controllers.v2 import base from ceilometer.api.controllers.v2 import samples from ceilometer.api.controllers.v2 import utils as v2_utils from ceilometer.api import rbac from ceilometer.i18n import _ from ceilometer import storage from ceilometer import utils LOG = log.getLogger(__name__) class ComplexQuery(base.Base): """Holds a sample query encoded in json.""" filter = wtypes.text "The filter expression encoded in json." orderby = wtypes.text "List of single-element dicts for specifying the ordering of the results." limit = int "The maximum number of results to be returned." @classmethod def sample(cls): return cls(filter='{"and": [{"and": [{"=": ' + '{"counter_name": "cpu_util"}}, ' + '{">": {"counter_volume": 0.23}}, ' + '{"<": {"counter_volume": 0.26}}]}, ' + '{"or": [{"and": [{">": ' + '{"timestamp": "2013-12-01T18:00:00"}}, ' + '{"<": ' + '{"timestamp": "2013-12-01T18:15:00"}}]}, ' + '{"and": [{">": ' + '{"timestamp": "2013-12-01T18:30:00"}}, ' + '{"<": ' + '{"timestamp": "2013-12-01T18:45:00"}}]}]}]}', orderby='[{"counter_volume": "ASC"}, ' + '{"timestamp": "DESC"}]', limit=42 ) def _list_to_regexp(items, regexp_prefix=""): regexp = ["^%s$" % item for item in items] regexp = regexp_prefix + "|".join(regexp) return regexp class ValidatedComplexQuery(object): complex_operators = ["and", "or"] order_directions = ["asc", "desc"] simple_ops = ["=", "!=", "<", ">", "<=", "=<", ">=", "=>", "=~"] regexp_prefix = "(?i)" complex_ops = _list_to_regexp(complex_operators, regexp_prefix) simple_ops = _list_to_regexp(simple_ops, regexp_prefix) order_directions = _list_to_regexp(order_directions, regexp_prefix) timestamp_fields = ["timestamp", "state_timestamp"] def __init__(self, query, db_model, additional_name_mapping=None, metadata_allowed=False): additional_name_mapping = additional_name_mapping or {} self.name_mapping = {"user": "user_id", "project": "project_id"} self.name_mapping.update(additional_name_mapping) valid_keys = db_model.get_field_names() valid_keys = list(valid_keys) + list(self.name_mapping.keys()) valid_fields = _list_to_regexp(valid_keys) if metadata_allowed: valid_filter_fields = valid_fields + "|^metadata\.[\S]+$" else: valid_filter_fields = valid_fields schema_value = { "oneOf": [{"type": "string"}, {"type": "number"}, {"type": "boolean"}], "minProperties": 1, "maxProperties": 1} schema_value_in = { "type": "array", "items": {"oneOf": [{"type": "string"}, {"type": "number"}]}, "minItems": 1} schema_field = { "type": "object", "patternProperties": {valid_filter_fields: schema_value}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_field_in = { "type": "object", "patternProperties": {valid_filter_fields: schema_value_in}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_leaf_in = { "type": "object", "patternProperties": {"(?i)^in$": schema_field_in}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_leaf_simple_ops = { "type": "object", "patternProperties": {self.simple_ops: schema_field}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_and_or_array = { "type": "array", "items": {"$ref": "#"}, "minItems": 2} schema_and_or = { "type": "object", "patternProperties": {self.complex_ops: schema_and_or_array}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_not = { "type": "object", "patternProperties": {"(?i)^not$": {"$ref": "#"}}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} self.schema = { "oneOf": [{"$ref": "#/definitions/leaf_simple_ops"}, {"$ref": "#/definitions/leaf_in"}, {"$ref": "#/definitions/and_or"}, {"$ref": "#/definitions/not"}], "minProperties": 1, "maxProperties": 1, "definitions": {"leaf_simple_ops": schema_leaf_simple_ops, "leaf_in": schema_leaf_in, "and_or": schema_and_or, "not": schema_not}} self.orderby_schema = { "type": "array", "items": { "type": "object", "patternProperties": {valid_fields: {"type": "string", "pattern": self.order_directions}}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1}} self.original_query = query def validate(self, visibility_field): """Validates the query content and does the necessary conversions.""" if self.original_query.filter is wtypes.Unset: self.filter_expr = None else: try: self.filter_expr = json.loads(self.original_query.filter) self._validate_filter(self.filter_expr) except (ValueError, jsonschema.exceptions.ValidationError) as e: raise base.ClientSideError( _("Filter expression not valid: %s") % e) self._replace_isotime_with_datetime(self.filter_expr) self._convert_operator_to_lower_case(self.filter_expr) self._normalize_field_names_for_db_model(self.filter_expr) self._force_visibility(visibility_field) if self.original_query.orderby is wtypes.Unset: self.orderby = None else: try: self.orderby = json.loads(self.original_query.orderby) self._validate_orderby(self.orderby) except (ValueError, jsonschema.exceptions.ValidationError) as e: raise base.ClientSideError( _("Order-by expression not valid: %s") % e) self._convert_orderby_to_lower_case(self.orderby) self._normalize_field_names_in_orderby(self.orderby) self.limit = (None if self.original_query.limit is wtypes.Unset else self.original_query.limit) self.limit = v2_utils.enforce_limit(self.limit) @staticmethod def _convert_orderby_to_lower_case(orderby): for orderby_field in orderby: utils.lowercase_values(orderby_field) def _normalize_field_names_in_orderby(self, orderby): for orderby_field in orderby: self._replace_field_names(orderby_field) def _traverse_postorder(self, tree, visitor): op = list(tree.keys())[0] if op.lower() in self.complex_operators: for i, operand in enumerate(tree[op]): self._traverse_postorder(operand, visitor) if op.lower() == "not": self._traverse_postorder(tree[op], visitor) visitor(tree) def _check_cross_project_references(self, own_project_id, visibility_field): """Do not allow other than own_project_id.""" def check_project_id(subfilter): op, value = list(subfilter.items())[0] if (op.lower() not in self.complex_operators and list(value.keys())[0] == visibility_field and value[visibility_field] != own_project_id): raise base.ProjectNotAuthorized(value[visibility_field]) self._traverse_postorder(self.filter_expr, check_project_id) def _force_visibility(self, visibility_field): """Force visibility field. If the tenant is not admin insert an extra "and =" clause to the query. """ authorized_project = rbac.get_limited_to_project(pecan.request.headers) is_admin = authorized_project is None if not is_admin: self._restrict_to_project(authorized_project, visibility_field) self._check_cross_project_references(authorized_project, visibility_field) def _restrict_to_project(self, project_id, visibility_field): restriction = {"=": {visibility_field: project_id}} if self.filter_expr is None: self.filter_expr = restriction else: self.filter_expr = {"and": [restriction, self.filter_expr]} def _replace_isotime_with_datetime(self, filter_expr): def replace_isotime(subfilter): op, value = list(subfilter.items())[0] if op.lower() not in self.complex_operators: field = list(value.keys())[0] if field in self.timestamp_fields: date_time = self._convert_to_datetime(subfilter[op][field]) subfilter[op][field] = date_time self._traverse_postorder(filter_expr, replace_isotime) def _normalize_field_names_for_db_model(self, filter_expr): def _normalize_field_names(subfilter): op, value = list(subfilter.items())[0] if op.lower() not in self.complex_operators: self._replace_field_names(value) self._traverse_postorder(filter_expr, _normalize_field_names) def _replace_field_names(self, subfilter): field, value = list(subfilter.items())[0] if field in self.name_mapping: del subfilter[field] subfilter[self.name_mapping[field]] = value if field.startswith("metadata."): del subfilter[field] subfilter["resource_" + field] = value def _convert_operator_to_lower_case(self, filter_expr): self._traverse_postorder(filter_expr, utils.lowercase_keys) @staticmethod def _convert_to_datetime(isotime): try: date_time = timeutils.parse_isotime(isotime) date_time = date_time.replace(tzinfo=None) return date_time except ValueError: LOG.exception(_("String %s is not a valid isotime") % isotime) msg = _('Failed to parse the timestamp value %s') % isotime raise base.ClientSideError(msg) def _validate_filter(self, filter_expr): jsonschema.validate(filter_expr, self.schema) def _validate_orderby(self, orderby_expr): jsonschema.validate(orderby_expr, self.orderby_schema) class QuerySamplesController(rest.RestController): """Provides complex query possibilities for samples.""" @wsme_pecan.wsexpose([samples.Sample], body=ComplexQuery) def post(self, body): """Define query for retrieving Sample data. :param body: Query rules for the samples to be returned. """ rbac.enforce('query_sample', pecan.request) sample_name_mapping = {"resource": "resource_id", "meter": "counter_name", "type": "counter_type", "unit": "counter_unit", "volume": "counter_volume"} query = ValidatedComplexQuery(body, storage.models.Sample, sample_name_mapping, metadata_allowed=True) query.validate(visibility_field="project_id") conn = pecan.request.storage_conn return [samples.Sample.from_db_model(s) for s in conn.query_samples(query.filter_expr, query.orderby, query.limit)] class QueryController(rest.RestController): samples = QuerySamplesController() ceilometer-6.1.5/ceilometer/api/controllers/v2/utils.py0000664000567000056710000003357413072744705024351 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import functools import inspect from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import pecan import six import wsme from ceilometer.api.controllers.v2 import base from ceilometer.api import rbac from ceilometer.i18n import _, _LI from ceilometer import utils LOG = log.getLogger(__name__) cfg.CONF.import_opt('default_api_return_limit', 'ceilometer.api.app', group='api') def enforce_limit(limit): """Ensure limit is defined and is valid. if not, set a default.""" if limit is None: limit = cfg.CONF.api.default_api_return_limit LOG.info(_LI('No limit value provided, result set will be' ' limited to %(limit)d.'), {'limit': limit}) if not limit or limit <= 0: raise base.ClientSideError(_("Limit must be positive")) return limit def get_auth_project(on_behalf_of=None): auth_project = rbac.get_limited_to_project(pecan.request.headers) created_by = pecan.request.headers.get('X-Project-Id') is_admin = auth_project is None if is_admin and on_behalf_of != created_by: auth_project = on_behalf_of return auth_project def sanitize_query(query, db_func, on_behalf_of=None): """Check the query. See if: 1) the request is coming from admin - then allow full visibility 2) non-admin - make sure that the query includes the requester's project. """ q = copy.copy(query) auth_project = get_auth_project(on_behalf_of) if auth_project: _verify_query_segregation(q, auth_project) proj_q = [i for i in q if i.field == 'project_id'] valid_keys = inspect.getargspec(db_func)[0] if not proj_q and 'on_behalf_of' not in valid_keys: # The user is restricted, but they didn't specify a project # so add it for them. q.append(base.Query(field='project_id', op='eq', value=auth_project)) return q def _verify_query_segregation(query, auth_project=None): """Ensure non-admin queries are not constrained to another project.""" auth_project = (auth_project or rbac.get_limited_to_project(pecan.request.headers)) if not auth_project: return for q in query: if q.field in ('project', 'project_id') and auth_project != q.value: raise base.ProjectNotAuthorized(q.value) def validate_query(query, db_func, internal_keys=None, allow_timestamps=True): """Validates the syntax of the query and verifies the query. Verification check if the query request is authorized for the included project. :param query: Query expression that should be validated :param db_func: the function on the storage level, of which arguments will form the valid_keys list, which defines the valid fields for a query expression :param internal_keys: internally used field names, that should not be used for querying :param allow_timestamps: defines whether the timestamp-based constraint is applicable for this query or not :raises InvalidInput: if an operator is not supported for a given field :raises InvalidInput: if timestamp constraints are allowed, but search_offset was included without timestamp constraint :raises: UnknownArgument: if a field name is not a timestamp field, nor in the list of valid keys """ internal_keys = internal_keys or [] _verify_query_segregation(query) valid_keys = inspect.getargspec(db_func)[0] internal_timestamp_keys = ['end_timestamp', 'start_timestamp', 'end_timestamp_op', 'start_timestamp_op'] if 'start_timestamp' in valid_keys: internal_keys += internal_timestamp_keys valid_keys += ['timestamp', 'search_offset'] internal_keys.append('self') internal_keys.append('metaquery') valid_keys = set(valid_keys) - set(internal_keys) translation = {'user_id': 'user', 'project_id': 'project', 'resource_id': 'resource'} has_timestamp_query = _validate_timestamp_fields(query, 'timestamp', ('lt', 'le', 'gt', 'ge'), allow_timestamps) has_search_offset_query = _validate_timestamp_fields(query, 'search_offset', 'eq', allow_timestamps) if has_search_offset_query and not has_timestamp_query: raise wsme.exc.InvalidInput('field', 'search_offset', "search_offset cannot be used without " + "timestamp") def _is_field_metadata(field): return (field.startswith('metadata.') or field.startswith('resource_metadata.')) for i in query: if i.field not in ('timestamp', 'search_offset'): key = translation.get(i.field, i.field) operator = i.op if key in valid_keys or _is_field_metadata(i.field): if operator == 'eq': if key == 'enabled': i._get_value_as_type('boolean') elif _is_field_metadata(key): i._get_value_as_type() else: raise wsme.exc.InvalidInput('op', i.op, 'unimplemented operator for ' '%s' % i.field) else: msg = ("unrecognized field in query: %s, " "valid keys: %s") % (query, sorted(valid_keys)) raise wsme.exc.UnknownArgument(key, msg) def _validate_timestamp_fields(query, field_name, operator_list, allow_timestamps): """Validates the timestamp related constraints in a query if there are any. :param query: query expression that may contain the timestamp fields :param field_name: timestamp name, which should be checked (timestamp, search_offset) :param operator_list: list of operators that are supported for that timestamp, which was specified in the parameter field_name :param allow_timestamps: defines whether the timestamp-based constraint is applicable to this query or not :returns: True, if there was a timestamp constraint, containing a timestamp field named as defined in field_name, in the query and it was allowed and syntactically correct. :returns: False, if there wasn't timestamp constraint, containing a timestamp field named as defined in field_name, in the query :raises InvalidInput: if an operator is unsupported for a given timestamp field :raises UnknownArgument: if the timestamp constraint is not allowed in the query """ for item in query: if item.field == field_name: # If *timestamp* or *search_offset* field was specified in the # query, but timestamp is not supported on that resource, on # which the query was invoked, then raise an exception. if not allow_timestamps: raise wsme.exc.UnknownArgument(field_name, "not valid for " + "this resource") if item.op not in operator_list: raise wsme.exc.InvalidInput('op', item.op, 'unimplemented operator for %s' % item.field) return True return False def query_to_kwargs(query, db_func, internal_keys=None, allow_timestamps=True): validate_query(query, db_func, internal_keys=internal_keys, allow_timestamps=allow_timestamps) query = sanitize_query(query, db_func) translation = {'user_id': 'user', 'project_id': 'project', 'resource_id': 'resource'} stamp = {} metaquery = {} kwargs = {} for i in query: if i.field == 'timestamp': if i.op in ('lt', 'le'): stamp['end_timestamp'] = i.value stamp['end_timestamp_op'] = i.op elif i.op in ('gt', 'ge'): stamp['start_timestamp'] = i.value stamp['start_timestamp_op'] = i.op else: if i.op == 'eq': if i.field == 'search_offset': stamp['search_offset'] = i.value elif i.field == 'enabled': kwargs[i.field] = i._get_value_as_type('boolean') elif i.field.startswith('metadata.'): metaquery[i.field] = i._get_value_as_type() elif i.field.startswith('resource_metadata.'): metaquery[i.field[9:]] = i._get_value_as_type() else: key = translation.get(i.field, i.field) kwargs[key] = i.value if metaquery and 'metaquery' in inspect.getargspec(db_func)[0]: kwargs['metaquery'] = metaquery if stamp: kwargs.update(_get_query_timestamps(stamp)) return kwargs def _get_query_timestamps(args=None): """Return any optional timestamp information in the request. Determine the desired range, if any, from the GET arguments. Set up the query range using the specified offset. [query_start ... start_timestamp ... end_timestamp ... query_end] Returns a dictionary containing: start_timestamp: First timestamp to use for query start_timestamp_op: First timestamp operator to use for query end_timestamp: Final timestamp to use for query end_timestamp_op: Final timestamp operator to use for query """ if args is None: return {} search_offset = int(args.get('search_offset', 0)) def _parse_timestamp(timestamp): if not timestamp: return None try: iso_timestamp = timeutils.parse_isotime(timestamp) iso_timestamp = iso_timestamp.replace(tzinfo=None) except ValueError: raise wsme.exc.InvalidInput('timestamp', timestamp, 'invalid timestamp format') return iso_timestamp start_timestamp = _parse_timestamp(args.get('start_timestamp')) end_timestamp = _parse_timestamp(args.get('end_timestamp')) start_timestamp = start_timestamp - datetime.timedelta( minutes=search_offset) if start_timestamp else None end_timestamp = end_timestamp + datetime.timedelta( minutes=search_offset) if end_timestamp else None return {'start_timestamp': start_timestamp, 'end_timestamp': end_timestamp, 'start_timestamp_op': args.get('start_timestamp_op'), 'end_timestamp_op': args.get('end_timestamp_op')} def flatten_metadata(metadata): """Return flattened resource metadata. Metadata is returned with flattened nested structures (except nested sets) and with all values converted to unicode strings. """ if metadata: # After changing recursive_keypairs` output we need to keep # flattening output unchanged. # Example: recursive_keypairs({'a': {'b':{'c':'d'}}}, '.') # output before: a.b:c=d # output now: a.b.c=d # So to keep the first variant just replace all dots except the first return dict((k.replace('.', ':').replace(':', '.', 1), six.text_type(v)) for k, v in utils.recursive_keypairs(metadata, separator='.') if type(v) is not set) return {} # TODO(fabiog): this decorator should disappear and have a more unified # way of controlling access and scope. Before messing with this, though # I feel this file should be re-factored in smaller chunks one for each # controller (e.g. meters and so on ...). Right now its size is # overwhelming. def requires_admin(func): @functools.wraps(func) def wrapped(*args, **kwargs): usr_limit, proj_limit = rbac.get_limited_to(pecan.request.headers) # If User and Project are None, you have full access. if usr_limit and proj_limit: # since this decorator get's called out of wsme context # raising exception results internal error so call abort # for handling the error ex = base.ProjectNotAuthorized(proj_limit) pecan.core.abort(status_code=ex.code, detail=ex.msg) return func(*args, **kwargs) return wrapped def requires_context(func): @functools.wraps(func) def wrapped(*args, **kwargs): req_usr = pecan.request.headers.get('X-User-Id') proj_usr = pecan.request.headers.get('X-Project-Id') if ((not req_usr) or (not proj_usr)): pecan.core.abort(status_code=403, detail='RBAC Authorization Failed') return func(*args, **kwargs) return wrapped ceilometer-6.1.5/ceilometer/api/controllers/v2/capabilities.py0000664000567000056710000001045613072744705025634 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from ceilometer.api.controllers.v2 import base from ceilometer import utils def _flatten_capabilities(capabilities): return dict((k, v) for k, v in utils.recursive_keypairs(capabilities)) class Capabilities(base.Base): """A representation of the API and storage capabilities. Usually constrained by restrictions imposed by the storage driver. """ api = {wtypes.text: bool} "A flattened dictionary of API capabilities" storage = {wtypes.text: bool} "A flattened dictionary of storage capabilities" event_storage = {wtypes.text: bool} "A flattened dictionary of event storage capabilities" @classmethod def sample(cls): return cls( api=_flatten_capabilities({ 'meters': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'resources': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'samples': {'query': {'simple': True, 'metadata': True, 'complex': True}}, 'statistics': {'groupby': True, 'query': {'simple': True, 'metadata': True, 'complex': False}, 'aggregation': {'standard': True, 'selectable': { 'max': True, 'min': True, 'sum': True, 'avg': True, 'count': True, 'stddev': True, 'cardinality': True, 'quartile': False}}}, 'events': {'query': {'simple': True}}, }), storage=_flatten_capabilities( {'storage': {'production_ready': True}}), event_storage=_flatten_capabilities( {'storage': {'production_ready': True}}), ) class CapabilitiesController(rest.RestController): """Manages capabilities queries.""" @wsme_pecan.wsexpose(Capabilities) def get(self): """Returns a flattened dictionary of API capabilities. Capabilities supported by the currently configured storage driver. """ # variation in API capabilities is effectively determined by # the lack of strict feature parity across storage drivers conn = pecan.request.storage_conn event_conn = pecan.request.event_storage_conn driver_capabilities = conn.get_capabilities().copy() driver_capabilities['events'] = event_conn.get_capabilities()['events'] driver_perf = conn.get_storage_capabilities() event_driver_perf = event_conn.get_storage_capabilities() return Capabilities(api=_flatten_capabilities(driver_capabilities), storage=_flatten_capabilities(driver_perf), event_storage=_flatten_capabilities( event_driver_perf)) ceilometer-6.1.5/ceilometer/api/controllers/v2/__init__.py0000664000567000056710000000000013072744703024720 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/api/controllers/__init__.py0000664000567000056710000000000013072744703024371 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/api/__init__.py0000664000567000056710000000234013072744705022036 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg # Register options for the service OPTS = [ cfg.PortOpt('port', default=8777, deprecated_name='metering_api_port', deprecated_group='DEFAULT', help='The port for the ceilometer API server.', ), cfg.StrOpt('host', default='0.0.0.0', help='The listen IP for the ceilometer API server.', ), ] CONF = cfg.CONF opt_group = cfg.OptGroup(name='api', title='Options for the ceilometer-api service') CONF.register_group(opt_group) CONF.register_opts(OPTS, opt_group) ceilometer-6.1.5/ceilometer/transformer/0000775000567000056710000000000013072745164021517 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/transformer/accumulator.py0000664000567000056710000000247413072744706024420 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Julien Danjou # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer import transformer class TransformerAccumulator(transformer.TransformerBase): """Transformer that accumulates samples until a threshold. And then flushes them out into the wild. """ grouping_keys = ['resource_id'] def __init__(self, size=1, **kwargs): if size >= 1: self.samples = [] self.size = size super(TransformerAccumulator, self).__init__(**kwargs) def handle_sample(self, context, sample): if self.size >= 1: self.samples.append(sample) else: return sample def flush(self, context): if len(self.samples) >= self.size: x = self.samples self.samples = [] return x return [] ceilometer-6.1.5/ceilometer/transformer/arithmetic.py0000664000567000056710000001355213072744706024231 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import keyword import math import re from oslo_log import log import six from ceilometer.i18n import _ from ceilometer import sample from ceilometer import transformer LOG = log.getLogger(__name__) class ArithmeticTransformer(transformer.TransformerBase): """Multi meter arithmetic transformer. Transformer that performs arithmetic operations over one or more meters and/or their metadata. """ grouping_keys = ['resource_id'] meter_name_re = re.compile(r'\$\(([\w\.\-]+)\)') def __init__(self, target=None, **kwargs): super(ArithmeticTransformer, self).__init__(**kwargs) target = target or {} self.target = target self.expr = target.get('expr', '') self.expr_escaped, self.escaped_names = self.parse_expr(self.expr) self.required_meters = list(self.escaped_names.values()) self.misconfigured = len(self.required_meters) == 0 if not self.misconfigured: self.reference_meter = self.required_meters[0] # convert to set for more efficient contains operation self.required_meters = set(self.required_meters) self.cache = collections.defaultdict(dict) self.latest_timestamp = None else: LOG.warning(_('Arithmetic transformer must use at least one' ' meter in expression \'%s\''), self.expr) def _update_cache(self, _sample): """Update the cache with the latest sample.""" escaped_name = self.escaped_names.get(_sample.name, '') if escaped_name not in self.required_meters: return self.cache[_sample.resource_id][escaped_name] = _sample def _check_requirements(self, resource_id): """Check if all the required meters are available in the cache.""" return len(self.cache[resource_id]) == len(self.required_meters) def _calculate(self, resource_id): """Evaluate the expression and return a new sample if successful.""" ns_dict = dict((m, s.as_dict()) for m, s in six.iteritems(self.cache[resource_id])) ns = transformer.Namespace(ns_dict) try: new_volume = eval(self.expr_escaped, {}, ns) if math.isnan(new_volume): raise ArithmeticError(_('Expression evaluated to ' 'a NaN value!')) reference_sample = self.cache[resource_id][self.reference_meter] return sample.Sample( name=self.target.get('name', reference_sample.name), unit=self.target.get('unit', reference_sample.unit), type=self.target.get('type', reference_sample.type), volume=float(new_volume), user_id=reference_sample.user_id, project_id=reference_sample.project_id, resource_id=reference_sample.resource_id, timestamp=self.latest_timestamp, resource_metadata=reference_sample.resource_metadata ) except Exception as e: LOG.warning(_('Unable to evaluate expression %(expr)s: %(exc)s'), {'expr': self.expr, 'exc': e}) def handle_sample(self, context, _sample): self._update_cache(_sample) self.latest_timestamp = _sample.timestamp def flush(self, context): new_samples = [] cache_clean_list = [] if not self.misconfigured: for resource_id in self.cache: if self._check_requirements(resource_id): new_samples.append(self._calculate(resource_id)) cache_clean_list.append(resource_id) for res_id in cache_clean_list: self.cache.pop(res_id) return new_samples @classmethod def parse_expr(cls, expr): """Transforms meter names in the expression into valid identifiers. :param expr: unescaped expression :return: A tuple of the escaped expression and a dict representing the translation of meter names into Python identifiers """ class Replacer(object): """Replaces matched meter names with escaped names. If the meter name is not followed by parameter access in the expression, it defaults to accessing the 'volume' parameter. """ def __init__(self, original_expr): self.original_expr = original_expr self.escaped_map = {} def __call__(self, match): meter_name = match.group(1) escaped_name = self.escape(meter_name) self.escaped_map[meter_name] = escaped_name if (match.end(0) == len(self.original_expr) or self.original_expr[match.end(0)] != '.'): escaped_name += '.volume' return escaped_name @staticmethod def escape(name): has_dot = '.' in name if has_dot: name = name.replace('.', '_') if has_dot or name.endswith('ESC') or name in keyword.kwlist: name = "_" + name + '_ESC' return name replacer = Replacer(expr) expr = re.sub(cls.meter_name_re, replacer, expr) return expr, replacer.escaped_map ceilometer-6.1.5/ceilometer/transformer/conversions.py0000664000567000056710000003143113072744706024444 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import re from oslo_log import log from oslo_utils import timeutils import six from ceilometer.i18n import _, _LW from ceilometer import sample from ceilometer import transformer LOG = log.getLogger(__name__) class BaseConversionTransformer(transformer.TransformerBase): """Transformer to derive conversion.""" grouping_keys = ['resource_id'] def __init__(self, source=None, target=None, **kwargs): """Initialize transformer with configured parameters. :param source: dict containing source sample unit :param target: dict containing target sample name, type, unit and scaling factor (a missing value connotes no change) """ source = source or {} target = target or {} self.source = source self.target = target super(BaseConversionTransformer, self).__init__(**kwargs) def _map(self, s, attr): """Apply the name or unit mapping if configured.""" mapped = None from_ = self.source.get('map_from') to_ = self.target.get('map_to') if from_ and to_: if from_.get(attr) and to_.get(attr): try: mapped = re.sub(from_[attr], to_[attr], getattr(s, attr)) except Exception: pass return mapped or self.target.get(attr, getattr(s, attr)) class DeltaTransformer(BaseConversionTransformer): """Transformer based on the delta of a sample volume.""" def __init__(self, target=None, growth_only=False, **kwargs): """Initialize transformer with configured parameters. :param growth_only: capture only positive deltas """ super(DeltaTransformer, self).__init__(target=target, **kwargs) self.growth_only = growth_only self.cache = {} def handle_sample(self, context, s): """Handle a sample, converting if necessary.""" key = s.name + s.resource_id prev = self.cache.get(key) timestamp = timeutils.parse_isotime(s.timestamp) self.cache[key] = (s.volume, timestamp) if prev: prev_volume = prev[0] prev_timestamp = prev[1] time_delta = timeutils.delta_seconds(prev_timestamp, timestamp) # disallow violations of the arrow of time if time_delta < 0: LOG.warning(_LW('Dropping out of time order sample: %s'), (s,)) # Reset the cache to the newer sample. self.cache[key] = prev return None volume_delta = s.volume - prev_volume if self.growth_only and volume_delta < 0: LOG.warning(_LW('Negative delta detected, dropping value')) s = None else: s = self._convert(s, volume_delta) LOG.debug('Converted to: %s', s) else: LOG.warning(_LW('Dropping sample with no predecessor: %s'), (s,)) s = None return s def _convert(self, s, delta): """Transform the appropriate sample fields.""" return sample.Sample( name=self._map(s, 'name'), unit=s.unit, type=sample.TYPE_DELTA, volume=delta, user_id=s.user_id, project_id=s.project_id, resource_id=s.resource_id, timestamp=s.timestamp, resource_metadata=s.resource_metadata ) class ScalingTransformer(BaseConversionTransformer): """Transformer to apply a scaling conversion.""" def __init__(self, source=None, target=None, **kwargs): """Initialize transformer with configured parameters. :param source: dict containing source sample unit :param target: dict containing target sample name, type, unit and scaling factor (a missing value connotes no change) """ super(ScalingTransformer, self).__init__(source=source, target=target, **kwargs) self.scale = self.target.get('scale') LOG.debug('scaling conversion transformer with source:' ' %(source)s target: %(target)s:', {'source': self.source, 'target': self.target}) def _scale(self, s): """Apply the scaling factor. Either a straight multiplicative factor or else a string to be eval'd. """ ns = transformer.Namespace(s.as_dict()) scale = self.scale return ((eval(scale, {}, ns) if isinstance(scale, six.string_types) else s.volume * scale) if scale else s.volume) def _convert(self, s, growth=1): """Transform the appropriate sample fields.""" return sample.Sample( name=self._map(s, 'name'), unit=self._map(s, 'unit'), type=self.target.get('type', s.type), volume=self._scale(s) * growth, user_id=s.user_id, project_id=s.project_id, resource_id=s.resource_id, timestamp=s.timestamp, resource_metadata=s.resource_metadata ) def handle_sample(self, context, s): """Handle a sample, converting if necessary.""" LOG.debug('handling sample %s', s) if self.source.get('unit', s.unit) == s.unit: s = self._convert(s) LOG.debug('converted to: %s', s) return s class RateOfChangeTransformer(ScalingTransformer): """Transformer based on the rate of change of a sample volume. For example taking the current and previous volumes of a cumulative sample and producing a gauge value based on the proportion of some maximum used. """ def __init__(self, **kwargs): """Initialize transformer with configured parameters.""" super(RateOfChangeTransformer, self).__init__(**kwargs) self.cache = {} self.scale = self.scale or '1' def handle_sample(self, context, s): """Handle a sample, converting if necessary.""" LOG.debug('handling sample %s', s) key = s.name + s.resource_id prev = self.cache.get(key) timestamp = timeutils.parse_isotime(s.timestamp) self.cache[key] = (s.volume, timestamp) if prev: prev_volume = prev[0] prev_timestamp = prev[1] time_delta = timeutils.delta_seconds(prev_timestamp, timestamp) # disallow violations of the arrow of time if time_delta < 0: LOG.warning(_('dropping out of time order sample: %s'), (s,)) # Reset the cache to the newer sample. self.cache[key] = prev return None # we only allow negative volume deltas for noncumulative # samples, whereas for cumulative we assume that a reset has # occurred in the interim so that the current volume gives a # lower bound on growth volume_delta = (s.volume - prev_volume if (prev_volume <= s.volume or s.type != sample.TYPE_CUMULATIVE) else s.volume) rate_of_change = ((1.0 * volume_delta / time_delta) if time_delta else 0.0) s = self._convert(s, rate_of_change) LOG.debug('converted to: %s', s) else: LOG.warning(_('dropping sample with no predecessor: %s'), (s,)) s = None return s class AggregatorTransformer(ScalingTransformer): """Transformer that aggregates samples. Aggregation goes until a threshold or/and a retention_time, and then flushes them out into the wild. Example: To aggregate sample by resource_metadata and keep the resource_metadata of the latest received sample; AggregatorTransformer(retention_time=60, resource_metadata='last') To aggregate sample by user_id and resource_metadata and keep the user_id of the first received sample and drop the resource_metadata. AggregatorTransformer(size=15, user_id='first', resource_metadata='drop') To keep the timestamp of the last received sample rather than the first: AggregatorTransformer(timestamp="last") """ def __init__(self, size=1, retention_time=None, project_id=None, user_id=None, resource_metadata="last", timestamp="first", **kwargs): super(AggregatorTransformer, self).__init__(**kwargs) self.samples = {} self.counts = collections.defaultdict(int) self.size = int(size) if size else None self.retention_time = float(retention_time) if retention_time else None if not (self.size or self.retention_time): self.size = 1 if timestamp in ["first", "last"]: self.timestamp = timestamp else: self.timestamp = "first" self.initial_timestamp = None self.aggregated_samples = 0 self.key_attributes = [] self.merged_attribute_policy = {} self._init_attribute('project_id', project_id) self._init_attribute('user_id', user_id) self._init_attribute('resource_metadata', resource_metadata, is_droppable=True, mandatory=True) def _init_attribute(self, name, value, is_droppable=False, mandatory=False): drop = ['drop'] if is_droppable else [] if value or mandatory: if value not in ['last', 'first'] + drop: LOG.warning('%s is unknown (%s), using last' % (name, value)) value = 'last' self.merged_attribute_policy[name] = value else: self.key_attributes.append(name) def _get_unique_key(self, s): # NOTE(arezmerita): in samples generated by ceilometer middleware, # when accessing without authentication publicly readable/writable # swift containers, the project_id and the user_id are missing. # They will be replaced by for unique key construction. keys = ['' if getattr(s, f) is None else getattr(s, f) for f in self.key_attributes] non_aggregated_keys = "-".join(keys) # NOTE(sileht): it assumes, a meter always have the same unit/type return "%s-%s-%s" % (s.name, s.resource_id, non_aggregated_keys) def handle_sample(self, context, sample_): if not self.initial_timestamp: self.initial_timestamp = timeutils.parse_isotime(sample_.timestamp) self.aggregated_samples += 1 key = self._get_unique_key(sample_) self.counts[key] += 1 if key not in self.samples: self.samples[key] = self._convert(sample_) if self.merged_attribute_policy[ 'resource_metadata'] == 'drop': self.samples[key].resource_metadata = {} else: if self.timestamp == "last": self.samples[key].timestamp = sample_.timestamp if sample_.type == sample.TYPE_CUMULATIVE: self.samples[key].volume = self._scale(sample_) else: self.samples[key].volume += self._scale(sample_) for field in self.merged_attribute_policy: if self.merged_attribute_policy[field] == 'last': setattr(self.samples[key], field, getattr(sample_, field)) def flush(self, context): if not self.initial_timestamp: return [] expired = (self.retention_time and timeutils.is_older_than(self.initial_timestamp, self.retention_time)) full = self.size and self.aggregated_samples >= self.size if full or expired: x = list(self.samples.values()) # gauge aggregates need to be averages for s in x: if s.type == sample.TYPE_GAUGE: key = self._get_unique_key(s) s.volume /= self.counts[key] self.samples.clear() self.counts.clear() self.aggregated_samples = 0 self.initial_timestamp = None return x return [] ceilometer-6.1.5/ceilometer/transformer/__init__.py0000664000567000056710000000466513072744706023644 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import six @six.add_metaclass(abc.ABCMeta) class TransformerBase(object): """Base class for plugins that transform the sample.""" def __init__(self, **kwargs): """Setup transformer. Each time a transformed is involved in a pipeline, a new transformer instance is created and chained into the pipeline. i.e. transformer instance is per pipeline. This helps if transformer need keep some cache and per-pipeline information. :param kwargs: The parameters that are defined in pipeline config file. """ super(TransformerBase, self).__init__() @abc.abstractmethod def handle_sample(self, context, sample): """Transform a sample. :param context: Passed from the data collector. :param sample: A sample. """ @abc.abstractproperty def grouping_keys(self): """Keys used to group transformer.""" def flush(self, context): """Flush samples cached previously. :param context: Passed from the data collector. """ return [] class Namespace(object): """Encapsulates the namespace. Encapsulation is done by wrapping the evaluation of the configured rule. This allows nested dicts to be accessed in the attribute style, and missing attributes to yield false when used in a boolean expression. """ def __init__(self, seed): self.__dict__ = collections.defaultdict(lambda: Namespace({})) self.__dict__.update(seed) for k, v in six.iteritems(self.__dict__): if isinstance(v, dict): self.__dict__[k] = Namespace(v) def __getattr__(self, attr): return self.__dict__[attr] def __getitem__(self, key): return self.__dict__[key] def __nonzero__(self): return len(self.__dict__) > 0 __bool__ = __nonzero__ ceilometer-6.1.5/ceilometer/sample.py0000664000567000056710000000713713072744706021021 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 eNovance # # Authors: Doug Hellmann # Julien Danjou # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Sample class for holding data about a metering event. A Sample doesn't really do anything, but we need a way to ensure that all of the appropriate fields have been filled in by the plugins that create them. """ import copy import uuid from oslo_config import cfg OPTS = [ cfg.StrOpt('sample_source', default='openstack', help='Source for samples emitted on this instance.'), ] cfg.CONF.register_opts(OPTS) # Fields explanation: # # Source: the source of this sample # Name: the name of the meter, must be unique # Type: the type of the meter, must be either: # - cumulative: the value is incremented and never reset to 0 # - delta: the value is reset to 0 each time it is sent # - gauge: the value is an absolute value and is not a counter # Unit: the unit of the meter # Volume: the sample value # User ID: the user ID # Project ID: the project ID # Resource ID: the resource ID # Timestamp: when the sample has been read # Resource metadata: various metadata # id: an uuid of a sample, can be taken from API when post sample via API class Sample(object): def __init__(self, name, type, unit, volume, user_id, project_id, resource_id, timestamp, resource_metadata, source=None, id=None): self.name = name self.type = type self.unit = unit self.volume = volume self.user_id = user_id self.project_id = project_id self.resource_id = resource_id self.timestamp = timestamp self.resource_metadata = resource_metadata self.source = source or cfg.CONF.sample_source self.id = id or str(uuid.uuid1()) def as_dict(self): return copy.copy(self.__dict__) def __repr__(self): return '' % ( self.name, self.volume, self.resource_id, self.timestamp) @classmethod def from_notification(cls, name, type, volume, unit, user_id, project_id, resource_id, message, timestamp=None, metadata=None, source=None): if not metadata: metadata = (copy.copy(message['payload']) if isinstance(message['payload'], dict) else {}) metadata['event_type'] = message['event_type'] metadata['host'] = message['publisher_id'] ts = timestamp if timestamp else message['timestamp'] return cls(name=name, type=type, volume=volume, unit=unit, user_id=user_id, project_id=project_id, resource_id=resource_id, timestamp=ts, resource_metadata=metadata, source=source) TYPE_GAUGE = 'gauge' TYPE_DELTA = 'delta' TYPE_CUMULATIVE = 'cumulative' TYPES = (TYPE_GAUGE, TYPE_DELTA, TYPE_CUMULATIVE) ceilometer-6.1.5/ceilometer/notification.py0000664000567000056710000003262013072744706022221 0ustar jenkinsjenkins00000000000000# # Copyright 2012-2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import threading from oslo_config import cfg from oslo_context import context from oslo_log import log import oslo_messaging from stevedore import extension from ceilometer.agent import plugin_base as base from ceilometer import coordination from ceilometer.event import endpoint as event_endpoint from ceilometer import exchange_control from ceilometer.i18n import _, _LI, _LW from ceilometer import messaging from ceilometer import pipeline from ceilometer import service_base from ceilometer import utils LOG = log.getLogger(__name__) OPTS = [ cfg.IntOpt('pipeline_processing_queues', default=10, min=1, help='Number of queues to parallelize workload across. This ' 'value should be larger than the number of active ' 'notification agents for optimal results.'), cfg.BoolOpt('ack_on_event_error', default=True, deprecated_group='collector', help='Acknowledge message when event persistence fails.'), cfg.BoolOpt('store_events', deprecated_group='collector', default=False, help='Save event details.'), cfg.BoolOpt('disable_non_metric_meters', default=True, help='WARNING: Ceilometer historically offered the ability to ' 'store events as meters. This usage is NOT advised as it ' 'can flood the metering database and cause performance ' 'degradation.'), cfg.BoolOpt('workload_partitioning', default=False, help='Enable workload partitioning, allowing multiple ' 'notification agents to be run simultaneously.'), cfg.MultiStrOpt('messaging_urls', default=[], secret=True, help="Messaging URLs to listen for notifications. " "Example: transport://user:pass@host1:port" "[,hostN:portN]/virtual_host " "(DEFAULT/transport_url is used if empty)"), cfg.IntOpt('batch_size', default=1, help='Number of notification messages to wait before ' 'publishing them'), cfg.IntOpt('batch_timeout', default=None, help='Number of seconds to wait before publishing samples' 'when batch_size is not reached (None means indefinitely)'), ] cfg.CONF.register_opts(exchange_control.EXCHANGE_OPTS) cfg.CONF.register_opts(OPTS, group="notification") cfg.CONF.import_opt('telemetry_driver', 'ceilometer.publisher.messaging', group='publisher_notifier') class NotificationService(service_base.BaseService): """Notification service. When running multiple agents, additional queuing sequence is required for inter process communication. Each agent has two listeners: one to listen to the main OpenStack queue and another listener(and notifier) for IPC to divide pipeline sink endpoints. Coordination should be enabled to have proper active/active HA. """ NOTIFICATION_NAMESPACE = 'ceilometer.notification' NOTIFICATION_IPC = 'ceilometer-pipe' @classmethod def _get_notifications_manager(cls, pm): return extension.ExtensionManager( namespace=cls.NOTIFICATION_NAMESPACE, invoke_on_load=True, invoke_args=(pm, ) ) def _get_notifiers(self, transport, pipe): notifiers = [] for x in range(cfg.CONF.notification.pipeline_processing_queues): notifiers.append(oslo_messaging.Notifier( transport, driver=cfg.CONF.publisher_notifier.telemetry_driver, publisher_id=pipe.name, topic='%s-%s-%s' % (self.NOTIFICATION_IPC, pipe.name, x))) return notifiers def _get_pipe_manager(self, transport, pipeline_manager): if cfg.CONF.notification.workload_partitioning: pipe_manager = pipeline.SamplePipelineTransportManager() for pipe in pipeline_manager.pipelines: key = pipeline.get_pipeline_grouping_key(pipe) pipe_manager.add_transporter( (pipe.source.support_meter, key or ['resource_id'], self._get_notifiers(transport, pipe))) else: pipe_manager = pipeline_manager return pipe_manager def _get_event_pipeline_manager(self, transport): if cfg.CONF.notification.store_events: if cfg.CONF.notification.workload_partitioning: event_pipe_manager = pipeline.EventPipelineTransportManager() for pipe in self.event_pipeline_manager.pipelines: event_pipe_manager.add_transporter( (pipe.source.support_event, ['event_type'], self._get_notifiers(transport, pipe))) else: event_pipe_manager = self.event_pipeline_manager return event_pipe_manager def start(self): super(NotificationService, self).start() self.partition_coordinator = None self.coord_lock = threading.Lock() self.listeners = [] # NOTE(kbespalov): for the pipeline queues used a single amqp host # hence only one listener is required self.pipeline_listener = None self.pipeline_manager = pipeline.setup_pipeline() if cfg.CONF.notification.store_events: self.event_pipeline_manager = pipeline.setup_event_pipeline() self.transport = messaging.get_transport() if cfg.CONF.notification.workload_partitioning: self.ctxt = context.get_admin_context() self.group_id = self.NOTIFICATION_NAMESPACE self.partition_coordinator = coordination.PartitionCoordinator() self.partition_coordinator.start() else: # FIXME(sileht): endpoint uses the notification_topics option # and it should not because this is an oslo_messaging option # not a ceilometer. Until we have something to get the # notification_topics in another way, we must create a transport # to ensure the option has been registered by oslo_messaging. messaging.get_notifier(self.transport, '') self.group_id = None self.pipe_manager = self._get_pipe_manager(self.transport, self.pipeline_manager) self.event_pipe_manager = self._get_event_pipeline_manager( self.transport) self._configure_main_queue_listeners(self.pipe_manager, self.event_pipe_manager) if cfg.CONF.notification.workload_partitioning: # join group after all manager set up is configured self.partition_coordinator.join_group(self.group_id) self.partition_coordinator.watch_group(self.group_id, self._refresh_agent) self.tg.add_timer(cfg.CONF.coordination.heartbeat, self.partition_coordinator.heartbeat) self.tg.add_timer(cfg.CONF.coordination.check_watchers, self.partition_coordinator.run_watchers) # configure pipelines after all coordination is configured. self._configure_pipeline_listener() if not cfg.CONF.notification.disable_non_metric_meters: LOG.warning(_LW('Non-metric meters may be collected. It is highly ' 'advisable to disable these meters using ' 'ceilometer.conf or the pipeline.yaml')) # Add a dummy thread to have wait() working self.tg.add_timer(604800, lambda: None) self.init_pipeline_refresh() def _configure_main_queue_listeners(self, pipe_manager, event_pipe_manager): notification_manager = self._get_notifications_manager(pipe_manager) if not list(notification_manager): LOG.warning(_('Failed to load any notification handlers for %s'), self.NOTIFICATION_NAMESPACE) ack_on_error = cfg.CONF.notification.ack_on_event_error endpoints = [] if cfg.CONF.notification.store_events: endpoints.append( event_endpoint.EventsNotificationEndpoint(event_pipe_manager)) targets = [] for ext in notification_manager: handler = ext.obj if (cfg.CONF.notification.disable_non_metric_meters and isinstance(handler, base.NonMetricNotificationBase)): continue LOG.debug('Event types from %(name)s: %(type)s' ' (ack_on_error=%(error)s)', {'name': ext.name, 'type': ', '.join(handler.event_types), 'error': ack_on_error}) # NOTE(gordc): this could be a set check but oslo_messaging issue # https://bugs.launchpad.net/oslo.messaging/+bug/1398511 # This ensures we don't create multiple duplicate consumers. for new_tar in handler.get_targets(cfg.CONF): if new_tar not in targets: targets.append(new_tar) endpoints.append(handler) urls = cfg.CONF.notification.messaging_urls or [None] for url in urls: transport = messaging.get_transport(url) listener = messaging.get_batch_notification_listener( transport, targets, endpoints, batch_size=cfg.CONF.notification.batch_size, batch_timeout=cfg.CONF.notification.batch_timeout) listener.start() self.listeners.append(listener) def _refresh_agent(self, event): self._configure_pipeline_listener() def _configure_pipeline_listener(self): with self.coord_lock: ev_pipes = [] if cfg.CONF.notification.store_events: ev_pipes = self.event_pipeline_manager.pipelines pipelines = self.pipeline_manager.pipelines + ev_pipes transport = messaging.get_transport() partitioned = self.partition_coordinator.extract_my_subset( self.group_id, range(cfg.CONF.notification.pipeline_processing_queues)) endpoints = [] targets = [] for pipe in pipelines: if isinstance(pipe, pipeline.EventPipeline): endpoints.append(pipeline.EventPipelineEndpoint(self.ctxt, pipe)) else: endpoints.append(pipeline.SamplePipelineEndpoint(self.ctxt, pipe)) for pipe_set, pipe in itertools.product(partitioned, pipelines): LOG.debug('Pipeline endpoint: %s from set: %s', pipe.name, pipe_set) topic = '%s-%s-%s' % (self.NOTIFICATION_IPC, pipe.name, pipe_set) targets.append(oslo_messaging.Target(topic=topic)) if self.pipeline_listener: self.pipeline_listener.stop() self.pipeline_listener.wait() self.pipeline_listener = messaging.get_batch_notification_listener( transport, targets, endpoints, batch_size=cfg.CONF.notification.batch_size, batch_timeout=cfg.CONF.notification.batch_timeout) self.pipeline_listener.start() def stop(self): if getattr(self, 'partition_coordinator', None): self.partition_coordinator.stop() listeners = [] if getattr(self, 'listeners', None): listeners.extend(self.listeners) if getattr(self, 'pipeline_listener', None): listeners.append(self.pipeline_listener) utils.kill_listeners(listeners) super(NotificationService, self).stop() def reload_pipeline(self): LOG.info(_LI("Reloading notification agent and listeners.")) if self.pipeline_validated: self.pipe_manager = self._get_pipe_manager( self.transport, self.pipeline_manager) if self.event_pipeline_validated: self.event_pipe_manager = self._get_event_pipeline_manager( self.transport) # re-start the main queue listeners. utils.kill_listeners(self.listeners) self._configure_main_queue_listeners( self.pipe_manager, self.event_pipe_manager) # re-start the pipeline listeners if workload partitioning # is enabled. if cfg.CONF.notification.workload_partitioning: self._configure_pipeline_listener() ceilometer-6.1.5/ceilometer/energy/0000775000567000056710000000000013072745164020446 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/energy/kwapi.py0000664000567000056710000001034713072744705022140 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from keystoneauth1 import exceptions from oslo_config import cfg from oslo_log import log import requests import six from ceilometer.agent import plugin_base from ceilometer import keystone_client from ceilometer import sample LOG = log.getLogger(__name__) SERVICE_OPTS = [ cfg.StrOpt('kwapi', default='energy', help='Kwapi service type.'), ] cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') class KwapiClient(object): """Kwapi API client.""" def __init__(self, url, token=None): """Initializes client.""" self.url = url self.token = token def iter_probes(self): """Returns a list of dicts describing all probes.""" probes_url = self.url + '/probes/' headers = {} if self.token is not None: headers = {'X-Auth-Token': self.token} timeout = cfg.CONF.http_timeout request = requests.get(probes_url, headers=headers, timeout=timeout) message = request.json() probes = message['probes'] for key, value in six.iteritems(probes): probe_dict = value probe_dict['id'] = key yield probe_dict class _Base(plugin_base.PollsterBase): """Base class for the Kwapi pollster, derived from PollsterBase.""" @property def default_discovery(self): return 'endpoint:%s' % cfg.CONF.service_types.kwapi @staticmethod def get_kwapi_client(ksclient, endpoint): """Returns a KwapiClient configured with the proper url and token.""" return KwapiClient(endpoint, keystone_client.get_auth_token(ksclient)) CACHE_KEY_PROBE = 'kwapi.probes' def _iter_probes(self, ksclient, cache, endpoint): """Iterate over all probes.""" key = '%s-%s' % (endpoint, self.CACHE_KEY_PROBE) if key not in cache: cache[key] = self._get_probes(ksclient, endpoint) return iter(cache[key]) def _get_probes(self, ksclient, endpoint): try: client = self.get_kwapi_client(ksclient, endpoint) except exceptions.EndpointNotFound: LOG.debug("Kwapi endpoint not found") return [] return list(client.iter_probes()) class EnergyPollster(_Base): """Measures energy consumption.""" def get_samples(self, manager, cache, resources): """Returns all samples.""" for endpoint in resources: for probe in self._iter_probes(manager.keystone, cache, endpoint): yield sample.Sample( name='energy', type=sample.TYPE_CUMULATIVE, unit='kWh', volume=probe['kwh'], user_id=None, project_id=None, resource_id=probe['id'], timestamp=datetime.datetime.fromtimestamp( probe['timestamp']).isoformat(), resource_metadata={} ) class PowerPollster(_Base): """Measures power consumption.""" def get_samples(self, manager, cache, resources): """Returns all samples.""" for endpoint in resources: for probe in self._iter_probes(manager.keystone, cache, endpoint): yield sample.Sample( name='power', type=sample.TYPE_GAUGE, unit='W', volume=probe['w'], user_id=None, project_id=None, resource_id=probe['id'], timestamp=datetime.datetime.fromtimestamp( probe['timestamp']).isoformat(), resource_metadata={} ) ceilometer-6.1.5/ceilometer/energy/__init__.py0000664000567000056710000000000013072744705022545 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/version.py0000664000567000056710000000121113072744703021205 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('ceilometer') ceilometer-6.1.5/ceilometer/ipmi/0000775000567000056710000000000013072745164020113 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/ipmi/pollsters/0000775000567000056710000000000013072745164022142 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/ipmi/pollsters/node.py0000664000567000056710000001234113072744706023443 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six from ceilometer.agent import plugin_base from ceilometer.i18n import _ from ceilometer.ipmi.platform import exception as nmexcept from ceilometer.ipmi.platform import intel_node_manager as node_manager from ceilometer import sample CONF = cfg.CONF CONF.import_opt('host', 'ceilometer.service') CONF.import_opt('polling_retry', 'ceilometer.ipmi.pollsters', group='ipmi') LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class _Base(plugin_base.PollsterBase): def setup_environment(self): super(_Base, self).setup_environment() self.nodemanager = node_manager.NodeManager() self.polling_failures = 0 # Do not load this extension if no NM support if self.nodemanager.nm_version == 0: raise plugin_base.ExtensionLoadError() @property def default_discovery(self): return 'local_node' def get_value(self, stats): """Get value from statistics.""" return node_manager._hex(stats["Current_value"]) @abc.abstractmethod def read_data(self, cache): """Return data sample for IPMI.""" def get_samples(self, manager, cache, resources): # Only one resource for Node Manager pollster try: stats = self.read_data(cache) except nmexcept.IPMIException: self.polling_failures += 1 LOG.warning(_('Polling %(name)s failed for %(cnt)s times!') % ({'name': self.NAME, 'cnt': self.polling_failures})) if (CONF.ipmi.polling_retry >= 0 and self.polling_failures > CONF.ipmi.polling_retry): LOG.warning(_('Pollster for %s is disabled!') % self.NAME) raise plugin_base.PollsterPermanentError(resources) else: return self.polling_failures = 0 metadata = { 'node': CONF.host } if stats: data = self.get_value(stats) yield sample.Sample( name=self.NAME, type=self.TYPE, unit=self.UNIT, volume=data, user_id=None, project_id=None, resource_id=CONF.host, timestamp=timeutils.utcnow().isoformat(), resource_metadata=metadata) class InletTemperaturePollster(_Base): # Note(ildikov): The new meter name should be # "hardware.ipmi.node.inlet_temperature". As currently there # is no meter deprecation support in the code, we should use the # old name in order to avoid confusion. NAME = "hardware.ipmi.node.temperature" TYPE = sample.TYPE_GAUGE UNIT = "C" def read_data(self, cache): return self.nodemanager.read_inlet_temperature() class OutletTemperaturePollster(_Base): NAME = "hardware.ipmi.node.outlet_temperature" TYPE = sample.TYPE_GAUGE UNIT = "C" def read_data(self, cache): return self.nodemanager.read_outlet_temperature() class PowerPollster(_Base): NAME = "hardware.ipmi.node.power" TYPE = sample.TYPE_GAUGE UNIT = "W" def read_data(self, cache): return self.nodemanager.read_power_all() class AirflowPollster(_Base): NAME = "hardware.ipmi.node.airflow" TYPE = sample.TYPE_GAUGE UNIT = "CFM" def read_data(self, cache): return self.nodemanager.read_airflow() class CUPSIndexPollster(_Base): NAME = "hardware.ipmi.node.cups" TYPE = sample.TYPE_GAUGE UNIT = "CUPS" def read_data(self, cache): return self.nodemanager.read_cups_index() def get_value(self, stats): return node_manager._hex(stats["CUPS_Index"]) class _CUPSUtilPollsterBase(_Base): CACHE_KEY_CUPS = 'CUPS' def read_data(self, cache): i_cache = cache.setdefault(self.CACHE_KEY_CUPS, {}) if not i_cache: i_cache.update(self.nodemanager.read_cups_utilization()) return i_cache class CPUUtilPollster(_CUPSUtilPollsterBase): NAME = "hardware.ipmi.node.cpu_util" TYPE = sample.TYPE_GAUGE UNIT = "%" def get_value(self, stats): return node_manager._hex(stats["CPU_Utilization"]) class MemUtilPollster(_CUPSUtilPollsterBase): NAME = "hardware.ipmi.node.mem_util" TYPE = sample.TYPE_GAUGE UNIT = "%" def get_value(self, stats): return node_manager._hex(stats["Mem_Utilization"]) class IOUtilPollster(_CUPSUtilPollsterBase): NAME = "hardware.ipmi.node.io_util" TYPE = sample.TYPE_GAUGE UNIT = "%" def get_value(self, stats): return node_manager._hex(stats["IO_Utilization"]) ceilometer-6.1.5/ceilometer/ipmi/pollsters/sensor.py0000664000567000056710000001010013072744706024016 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from ceilometer.agent import plugin_base from ceilometer.i18n import _ from ceilometer.ipmi.notifications import ironic as parser from ceilometer.ipmi.platform import exception as ipmiexcept from ceilometer.ipmi.platform import ipmi_sensor from ceilometer import sample CONF = cfg.CONF CONF.import_opt('host', 'ceilometer.service') CONF.import_opt('polling_retry', 'ceilometer.ipmi.pollsters', group='ipmi') LOG = log.getLogger(__name__) class InvalidSensorData(ValueError): pass class SensorPollster(plugin_base.PollsterBase): METRIC = None def setup_environment(self): super(SensorPollster, self).setup_environment() self.ipmi = ipmi_sensor.IPMISensor() self.polling_failures = 0 # Do not load this extension if no IPMI support if not self.ipmi.ipmi_support: raise plugin_base.ExtensionLoadError() @property def default_discovery(self): return 'local_node' @staticmethod def _get_sensor_types(data, sensor_type): try: return (sensor_type_data for _, sensor_type_data in data[sensor_type].items()) except KeyError: return [] def get_samples(self, manager, cache, resources): # Only one resource for IPMI pollster try: stats = self.ipmi.read_sensor_any(self.METRIC) except ipmiexcept.IPMIException: self.polling_failures += 1 LOG.warning(_( 'Polling %(mtr)s sensor failed for %(cnt)s times!') % ({'mtr': self.METRIC, 'cnt': self.polling_failures})) if (CONF.ipmi.polling_retry >= 0 and self.polling_failures > CONF.ipmi.polling_retry): LOG.warning(_('Pollster for %s is disabled!') % self.METRIC) raise plugin_base.PollsterPermanentError(resources) else: return self.polling_failures = 0 sensor_type_data = self._get_sensor_types(stats, self.METRIC) for sensor_data in sensor_type_data: # Continue if sensor_data is not parseable. try: sensor_reading = sensor_data['Sensor Reading'] sensor_id = sensor_data['Sensor ID'] except KeyError: continue if not parser.validate_reading(sensor_reading): continue try: volume, unit = parser.parse_reading(sensor_reading) except parser.InvalidSensorData: continue resource_id = '%(host)s-%(sensor-id)s' % { 'host': CONF.host, 'sensor-id': parser.transform_id(sensor_id) } metadata = { 'node': CONF.host } yield sample.Sample( name='hardware.ipmi.%s' % self.METRIC.lower(), type=sample.TYPE_GAUGE, unit=unit, volume=volume, user_id=None, project_id=None, resource_id=resource_id, timestamp=timeutils.utcnow().isoformat(), resource_metadata=metadata) class TemperatureSensorPollster(SensorPollster): METRIC = 'Temperature' class CurrentSensorPollster(SensorPollster): METRIC = 'Current' class FanSensorPollster(SensorPollster): METRIC = 'Fan' class VoltageSensorPollster(SensorPollster): METRIC = 'Voltage' ceilometer-6.1.5/ceilometer/ipmi/pollsters/__init__.py0000664000567000056710000000175013072744706024257 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Pollsters for IPMI and Intel Node Manager """ from oslo_config import cfg OPTS = [ cfg.IntOpt('polling_retry', default=3, help='Tolerance of IPMI/NM polling failures ' 'before disable this pollster. ' 'Negative indicates retrying forever.') ] cfg.CONF.register_opts(OPTS, group='ipmi') ceilometer-6.1.5/ceilometer/ipmi/platform/0000775000567000056710000000000013072745164021737 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/ipmi/platform/ipmi_sensor.py0000664000567000056710000000765613072744703024654 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """IPMI sensor to collect various sensor data of compute node""" from ceilometer.i18n import _ from ceilometer.ipmi.platform import exception as ipmiexcept from ceilometer.ipmi.platform import ipmitool IPMICMD = {"sdr_dump": "sdr dump", "sdr_info": "sdr info", "sensor_dump": "sdr -v", "sensor_dump_temperature": "sdr -v type Temperature", "sensor_dump_current": "sdr -v type Current", "sensor_dump_fan": "sdr -v type Fan", "sensor_dump_voltage": "sdr -v type Voltage"} # Requires translation of output into dict DICT_TRANSLATE_TEMPLATE = {"translate": 1} class IPMISensor(object): """The python implementation of IPMI sensor using ipmitool The class implements the IPMI sensor to get various sensor data of compute node. It uses ipmitool to execute the IPMI command and parse the output into dict. """ _inited = False _instance = None def __new__(cls, *args, **kwargs): """Singleton to avoid duplicated initialization.""" if not cls._instance: cls._instance = super(IPMISensor, cls).__new__(cls, *args, **kwargs) return cls._instance def __init__(self): if not (self._instance and self._inited): self.ipmi_support = False self._inited = True self.ipmi_support = self.check_ipmi() @ipmitool.execute_ipmi_cmd() def _get_sdr_info(self): """Get the SDR info.""" return IPMICMD['sdr_info'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_all(self): """Get the sensor data for type.""" return IPMICMD['sensor_dump'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_temperature(self): """Get the sensor data for Temperature.""" return IPMICMD['sensor_dump_temperature'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_voltage(self): """Get the sensor data for Voltage.""" return IPMICMD['sensor_dump_voltage'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_current(self): """Get the sensor data for Current.""" return IPMICMD['sensor_dump_current'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_fan(self): """Get the sensor data for Fan.""" return IPMICMD['sensor_dump_fan'] def read_sensor_any(self, sensor_type=''): """Get the sensor data for type.""" if not self.ipmi_support: return {} mapping = {'': self._read_sensor_all, 'Temperature': self._read_sensor_temperature, 'Fan': self._read_sensor_fan, 'Voltage': self._read_sensor_voltage, 'Current': self._read_sensor_current} try: return mapping[sensor_type]() except KeyError: raise ipmiexcept.IPMIException(_('Wrong sensor type')) def check_ipmi(self): """IPMI capability checking This function is used to detect if compute node is IPMI capable platform. Just run a simple IPMI command to get SDR info for check. """ try: self._get_sdr_info() except ipmiexcept.IPMIException: return False return True ceilometer-6.1.5/ceilometer/ipmi/platform/ipmitool.py0000664000567000056710000001061213072744703024143 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utils to run ipmitool for data collection""" from oslo_concurrency import processutils from ceilometer.i18n import _ from ceilometer.ipmi.platform import exception as ipmiexcept from ceilometer import utils # Following 2 functions are copied from ironic project to handle ipmitool's # sensor data output. Need code clean and sharing in future. # Check ironic/drivers/modules/ipmitool.py def _get_sensor_type(sensor_data_dict): # Have only three sensor type name IDs: 'Sensor Type (Analog)' # 'Sensor Type (Discrete)' and 'Sensor Type (Threshold)' for key in ('Sensor Type (Analog)', 'Sensor Type (Discrete)', 'Sensor Type (Threshold)'): try: return sensor_data_dict[key].split(' ', 1)[0] except KeyError: continue raise ipmiexcept.IPMIException(_("parse IPMI sensor data failed," "unknown sensor type")) def _process_sensor(sensor_data): sensor_data_fields = sensor_data.split('\n') sensor_data_dict = {} for field in sensor_data_fields: if not field: continue kv_value = field.split(':') if len(kv_value) != 2: continue sensor_data_dict[kv_value[0].strip()] = kv_value[1].strip() return sensor_data_dict def _translate_output(output): """Translate the return value into JSON dict :param output: output of the execution of IPMI command(sensor reading) """ sensors_data_dict = {} sensors_data_array = output.split('\n\n') for sensor_data in sensors_data_array: sensor_data_dict = _process_sensor(sensor_data) if not sensor_data_dict: continue sensor_type = _get_sensor_type(sensor_data_dict) # ignore the sensors which have no current 'Sensor Reading' data sensor_id = sensor_data_dict['Sensor ID'] if 'Sensor Reading' in sensor_data_dict: sensors_data_dict.setdefault(sensor_type, {})[sensor_id] = sensor_data_dict # get nothing, no valid sensor data if not sensors_data_dict: raise ipmiexcept.IPMIException(_("parse IPMI sensor data failed," "No data retrieved from given input")) return sensors_data_dict def _parse_output(output, template): """Parse the return value of IPMI command into dict :param output: output of the execution of IPMI command :param template: a dict that contains the expected items of IPMI command and its length. """ ret = {} index = 0 if not (output and template): return ret if "translate" in template: ret = _translate_output(output) else: output_list = output.strip().replace('\n', '').split(' ') if sum(template.values()) != len(output_list): raise ipmiexcept.IPMIException(_("ipmitool output " "length mismatch")) for item in template.items(): index_end = index + item[1] update_value = output_list[index: index_end] ret[item[0]] = update_value index = index_end return ret def execute_ipmi_cmd(template=None): """Decorator for the execution of IPMI command. It parses the output of IPMI command into dictionary. """ template = template or [] def _execute_ipmi_cmd(f): def _execute(self, **kwargs): args = ['ipmitool'] command = f(self, **kwargs) args.extend(command.split(" ")) try: (out, __) = utils.execute(*args, run_as_root=True) except processutils.ProcessExecutionError: raise ipmiexcept.IPMIException(_("running ipmitool failure")) return _parse_output(out, template) return _execute return _execute_ipmi_cmd ceilometer-6.1.5/ceilometer/ipmi/platform/intel_node_manager.py0000664000567000056710000003237713072744706026140 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Node manager engine to collect power and temperature of compute node. Intel Node Manager Technology enables the datacenter IT to monitor and control actual server power, thermal and compute utlization behavior through industry defined standard IPMI. This file provides Node Manager engine to get simple system power and temperature data based on ipmitool. """ import binascii import collections import tempfile import time from oslo_config import cfg import six from ceilometer.i18n import _ from ceilometer.ipmi.platform import exception as nmexcept from ceilometer.ipmi.platform import ipmitool OPTS = [ cfg.IntOpt('node_manager_init_retry', default=3, help='Number of retries upon Intel Node ' 'Manager initialization failure') ] CONF = cfg.CONF CONF.register_opts(OPTS, group='ipmi') IPMICMD = {"sdr_dump": "sdr dump", "sdr_info": "sdr info", "sensor_dump": "sdr -v"} IPMIRAWCMD = {"get_device_id": "raw 0x06 0x01", "get_nm_version": "raw 0x2e 0xca 0x57 0x01 0x00", "init_sensor_agent": "raw 0x0a 0x2c 0x01", "init_complete": "raw 0x0a 0x2c 0x00", "init_sensor_agent_status": "raw 0x0a 0x2c 0x00", "read_power_all": "raw 0x2e 0xc8 0x57 0x01 0x00 0x01 0x00 0x00", "read_inlet_temperature": "raw 0x2e 0xc8 0x57 0x01 0x00 0x02 0x00 0x00", "read_outlet_temperature": "raw 0x2e 0xc8 0x57 0x01 0x00 0x05 0x00 0x00", "read_airflow": "raw 0x2e 0xc8 0x57 0x01 0x00 0x04 0x00 0x00", "read_cups_utilization": "raw 0x2e 0x65 0x57 0x01 0x00 0x05", "read_cups_index": "raw 0x2e 0x65 0x57 0x01 0x00 0x01"} MANUFACTURER_ID_INTEL = ['57', '01', '00'] INTEL_PREFIX = '5701000d01' # The template dict are made according to the spec. It contains the expected # length of each item. And it can be used to parse the output of IPMI command. ONE_RETURN_TEMPLATE = {"ret": 1} BMC_INFO_TEMPLATE = collections.OrderedDict() BMC_INFO_TEMPLATE['Device_ID'] = 1 BMC_INFO_TEMPLATE['Device_Revision'] = 1 BMC_INFO_TEMPLATE['Firmware_Revision_1'] = 1 BMC_INFO_TEMPLATE['Firmware_Revision_2'] = 1 BMC_INFO_TEMPLATE['IPMI_Version'] = 1 BMC_INFO_TEMPLATE['Additional_Device_support'] = 1 BMC_INFO_TEMPLATE['Manufacturer_ID'] = 3 BMC_INFO_TEMPLATE['Product_ID'] = 2 BMC_INFO_TEMPLATE['Auxiliary_Firmware_Revision'] = 4 NM_STATISTICS_TEMPLATE = collections.OrderedDict() NM_STATISTICS_TEMPLATE['Manufacturer_ID'] = 3 NM_STATISTICS_TEMPLATE['Current_value'] = 2 NM_STATISTICS_TEMPLATE['Minimum_value'] = 2 NM_STATISTICS_TEMPLATE['Maximum_value'] = 2 NM_STATISTICS_TEMPLATE['Average_value'] = 2 NM_STATISTICS_TEMPLATE['Time_stamp'] = 4 NM_STATISTICS_TEMPLATE['Report_period'] = 4 NM_STATISTICS_TEMPLATE["DomainID_PolicyState"] = 1 NM_GET_DEVICE_ID_TEMPLATE = collections.OrderedDict() NM_GET_DEVICE_ID_TEMPLATE['Device_ID'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Device_revision'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Firmware_revision_1'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Firmware_Revision_2'] = 1 NM_GET_DEVICE_ID_TEMPLATE['IPMI_Version'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Additinal_Device_support'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Manufacturer_ID'] = 3 NM_GET_DEVICE_ID_TEMPLATE['Product_ID_min_version'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Product_ID_major_version'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Implemented_firmware'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Firmware_build_number'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Last_digit_firmware_build_number'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Image_flags'] = 1 NM_GET_VERSION_TEMPLATE = collections.OrderedDict() NM_GET_VERSION_TEMPLATE['Manufacturer_ID'] = 3 NM_GET_VERSION_TEMPLATE['NM_Version'] = 1 NM_GET_VERSION_TEMPLATE['IPMI_Version'] = 1 NM_GET_VERSION_TEMPLATE['Patch_Version'] = 1 NM_GET_VERSION_TEMPLATE['Firmware_Revision_Major'] = 1 NM_GET_VERSION_TEMPLATE['Firmware_Revision_Minor'] = 1 NM_CUPS_UTILIZATION_TEMPLATE = collections.OrderedDict() NM_CUPS_UTILIZATION_TEMPLATE['Manufacturer_ID'] = 3 NM_CUPS_UTILIZATION_TEMPLATE['CPU_Utilization'] = 8 NM_CUPS_UTILIZATION_TEMPLATE['Mem_Utilization'] = 8 NM_CUPS_UTILIZATION_TEMPLATE['IO_Utilization'] = 8 NM_CUPS_INDEX_TEMPLATE = collections.OrderedDict() NM_CUPS_INDEX_TEMPLATE['Manufacturer_ID'] = 3 NM_CUPS_INDEX_TEMPLATE['CUPS_Index'] = 2 def _hex(list=None): """Format the return value in list into hex.""" list = list or [] if list: list.reverse() return int(''.join(list), 16) return 0 class NodeManager(object): """The python implementation of Intel Node Manager engine using ipmitool The class implements the engine to read power and temperature of compute node. It uses ipmitool to execute the IPMI command and parse the output into dict. """ _inited = False _instance = None def __new__(cls, *args, **kwargs): """Singleton to avoid duplicated initialization.""" if not cls._instance: cls._instance = super(NodeManager, cls).__new__(cls, *args, **kwargs) return cls._instance def __init__(self): if not (self._instance and self._inited): # As singleton, only the 1st NM pollster would trigger its # initialization. nm_version indicate init result, and is shared # across all pollsters self._inited = True self.nm_version = 0 self.channel_slave = '' self.nm_version = self.check_node_manager() @staticmethod def _parse_slave_and_channel(file_path): """Parse the dumped file to get slave address and channel number. :param file_path: file path of dumped SDR file. :return: slave address and channel number of target device. """ ret = None prefix = INTEL_PREFIX # According to Intel Node Manager spec, section 4.5, for Intel NM # discovery OEM SDR records are type C0h. It contains manufacture ID # and OEM data in the record body. # 0-2 bytes are OEM ID, byte 3 is 0Dh and byte 4 is 01h. Byte 5, 6 # is Intel NM device slave address and channel number/sensor owner LUN. with open(file_path, 'rb') as bin_fp: for line in bin_fp.readlines(): if line: data_str = binascii.hexlify(line) if six.PY3: data_str = data_str.decode('ascii') if prefix in data_str: oem_id_index = data_str.index(prefix) ret = data_str[oem_id_index + len(prefix): oem_id_index + len(prefix) + 4] # Byte 5 is slave address. [7:4] from byte 6 is channel # number, so just pick ret[2] here. ret = (ret[0:2], ret[2]) break return ret @ipmitool.execute_ipmi_cmd(BMC_INFO_TEMPLATE) def get_device_id(self): """IPMI command GET_DEVICE_ID.""" return IPMIRAWCMD["get_device_id"] @ipmitool.execute_ipmi_cmd(ONE_RETURN_TEMPLATE) def _init_sensor_agent(self): """Run initialization agent.""" return IPMIRAWCMD["init_sensor_agent"] @ipmitool.execute_ipmi_cmd(ONE_RETURN_TEMPLATE) def _init_sensor_agent_process(self): """Check the status of initialization agent.""" return IPMIRAWCMD["init_sensor_agent_status"] @ipmitool.execute_ipmi_cmd() def _dump_sdr_file(self, data_file=""): """Dump SDR into a file.""" return IPMICMD["sdr_dump"] + " " + data_file @ipmitool.execute_ipmi_cmd(NM_GET_DEVICE_ID_TEMPLATE) def _node_manager_get_device_id(self): """GET_DEVICE_ID command in Intel Node Manager Different from IPMI command GET_DEVICE_ID, it contains more information of Intel Node Manager. """ return self.channel_slave + ' ' + IPMIRAWCMD["get_device_id"] @ipmitool.execute_ipmi_cmd(NM_GET_VERSION_TEMPLATE) def _node_manager_get_version(self): """GET_NODE_MANAGER_VERSION command in Intel Node Manager Byte 4 of the response: 01h - Intel NM 1.0 02h - Intel NM 1.5 03h - Intel NM 2.0 04h - Intel NM 2.5 05h - Intel NM 3.0 """ return self.channel_slave + ' ' + IPMIRAWCMD["get_nm_version"] @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) def _read_power_all(self): """Get the power consumption of the whole platform.""" return self.channel_slave + ' ' + IPMIRAWCMD['read_power_all'] @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) def _read_inlet_temperature(self): """Get the inlet temperature info of the whole platform.""" return self.channel_slave + ' ' + IPMIRAWCMD['read_inlet_temperature'] @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) def _read_outlet_temperature(self): """Get the outlet temperature info of the whole platform.""" return self.channel_slave + ' ' + IPMIRAWCMD['read_outlet_temperature'] @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) def _read_airflow(self): """Get the volumetric airflow of the whole platform.""" return self.channel_slave + ' ' + IPMIRAWCMD['read_airflow'] @ipmitool.execute_ipmi_cmd(NM_CUPS_UTILIZATION_TEMPLATE) def _read_cups_utilization(self): """Get the average CUPS utilization of the whole platform.""" return self.channel_slave + ' ' + IPMIRAWCMD['read_cups_utilization'] @ipmitool.execute_ipmi_cmd(NM_CUPS_INDEX_TEMPLATE) def _read_cups_index(self): """Get the CUPS Index of the whole platform.""" return self.channel_slave + ' ' + IPMIRAWCMD['read_cups_index'] def read_power_all(self): return self._read_power_all() if self.nm_version > 0 else {} def read_inlet_temperature(self): return self._read_inlet_temperature() if self.nm_version > 0 else {} def read_outlet_temperature(self): return self._read_outlet_temperature() if self.nm_version >= 5 else {} def read_airflow(self): # only available after NM 3.0 return self._read_airflow() if self.nm_version >= 5 else {} def read_cups_utilization(self): # only available after NM 3.0 return self._read_cups_utilization() if self.nm_version >= 5 else {} def read_cups_index(self): # only available after NM 3.0 return self._read_cups_index() if self.nm_version >= 5 else {} def init_node_manager(self): if self._init_sensor_agent_process()['ret'] == ['01']: return # Run sensor initialization agent for i in range(CONF.ipmi.node_manager_init_retry): self._init_sensor_agent() time.sleep(1) if self._init_sensor_agent_process()['ret'] == ['01']: return raise nmexcept.NodeManagerException(_('Node Manager init failed')) def discover_slave_channel(self): """Discover target slave address and channel number.""" file_path = tempfile.mkstemp()[1] self._dump_sdr_file(data_file=file_path) ret = self._parse_slave_and_channel(file_path) slave_address = ''.join(['0x', ret[0]]) channel = ''.join(['0x', ret[1]]) # String of channel and slave_address self.channel_slave = '-b ' + channel + ' -t ' + slave_address def node_manager_version(self): """Intel Node Manager capability checking This function is used to detect if compute node support Intel Node Manager(return version number) or not(return -1) and parse out the slave address and channel number of node manager. """ self.manufacturer_id = self.get_device_id()['Manufacturer_ID'] if MANUFACTURER_ID_INTEL != self.manufacturer_id: # If the manufacturer is not Intel, just set False and return. return 0 self.discover_slave_channel() support = self._node_manager_get_device_id()['Implemented_firmware'] # According to Intel Node Manager spec, return value of GET_DEVICE_ID, # bits 3 to 0 shows if Intel NM implemented or not. if int(support[0], 16) & 0xf == 0: return 0 return _hex(self._node_manager_get_version()['NM_Version']) def check_node_manager(self): """Intel Node Manager init and check This function is used to initialize Intel Node Manager and check the capability without throwing exception. It's safe to call it on non-NodeManager platform. """ try: self.init_node_manager() nm_version = self.node_manager_version() except (nmexcept.NodeManagerException, nmexcept.IPMIException): return 0 return nm_version ceilometer-6.1.5/ceilometer/ipmi/platform/exception.py0000664000567000056710000000132613072744703024307 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class NodeManagerException(Exception): pass class IPMIException(Exception): pass ceilometer-6.1.5/ceilometer/ipmi/platform/__init__.py0000664000567000056710000000000013072744703024034 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/ipmi/notifications/0000775000567000056710000000000013072745164022764 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/ipmi/notifications/ironic.py0000664000567000056710000001307713072744706024632 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Converters for producing hardware sensor data sample messages from notification events. """ from oslo_config import cfg from oslo_log import log import oslo_messaging as messaging from ceilometer.agent import plugin_base from ceilometer import sample LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('ironic_exchange', default='ironic', help='Exchange name for Ironic notifications.'), ] cfg.CONF.register_opts(OPTS) # Map unit name to SI UNIT_MAP = { 'Watts': 'W', 'Volts': 'V', } def validate_reading(data): """Some sensors read "Disabled".""" return data != 'Disabled' def transform_id(data): return data.lower().replace(' ', '_') def parse_reading(data): try: volume, unit = data.split(' ', 1) unit = unit.rsplit(' ', 1)[-1] return float(volume), UNIT_MAP.get(unit, unit) except ValueError: raise InvalidSensorData('unable to parse sensor reading: %s' % data) class InvalidSensorData(ValueError): pass class SensorNotification(plugin_base.NotificationBase): """A generic class for extracting samples from sensor data notifications. A notification message can contain multiple samples from multiple sensors, all with the same basic structure: the volume for the sample is found as part of the value of a 'Sensor Reading' key. The unit is in the same value. Subclasses exist solely to allow flexibility with stevedore configuration. """ event_types = ['hardware.ipmi.*'] metric = None def get_targets(self, conf): """oslo.messaging.TargetS for this plugin.""" return [messaging.Target(topic=topic, exchange=conf.ironic_exchange) for topic in self.get_notification_topics(conf)] def _get_sample(self, message): try: return (payload for _, payload in message['payload'][self.metric].items()) except KeyError: return [] @staticmethod def _package_payload(message, payload): # NOTE(chdent): How much of the payload should we keep? payload['node'] = message['payload']['node_uuid'] info = {'publisher_id': message['publisher_id'], 'timestamp': message['payload']['timestamp'], 'event_type': message['payload']['event_type'], 'user_id': message['payload'].get('user_id'), 'project_id': message['payload'].get('project_id'), 'payload': payload} return info def process_notification(self, message): """Read and process a notification. The guts of a message are in dict value of a 'payload' key which then itself has a payload key containing a dict of multiple sensor readings. If expected keys in the payload are missing or values are not in the expected form for transformations, KeyError and ValueError are caught and the current sensor payload is skipped. """ payloads = self._get_sample(message['payload']) for payload in payloads: try: # Provide a fallback resource_id in case parts are missing. resource_id = 'missing id' try: resource_id = '%(nodeid)s-%(sensorid)s' % { 'nodeid': message['payload']['node_uuid'], 'sensorid': transform_id(payload['Sensor ID']) } except KeyError as exc: raise InvalidSensorData('missing key in payload: %s' % exc) info = self._package_payload(message, payload) try: sensor_reading = info['payload']['Sensor Reading'] except KeyError as exc: raise InvalidSensorData( "missing 'Sensor Reading' in payload" ) if validate_reading(sensor_reading): volume, unit = parse_reading(sensor_reading) yield sample.Sample.from_notification( name='hardware.ipmi.%s' % self.metric.lower(), type=sample.TYPE_GAUGE, unit=unit, volume=volume, resource_id=resource_id, message=info, user_id=info['user_id'], project_id=info['project_id']) except InvalidSensorData as exc: LOG.warning( 'invalid sensor data for %(resource)s: %(error)s' % dict(resource=resource_id, error=exc) ) continue class TemperatureSensorNotification(SensorNotification): metric = 'Temperature' class CurrentSensorNotification(SensorNotification): metric = 'Current' class FanSensorNotification(SensorNotification): metric = 'Fan' class VoltageSensorNotification(SensorNotification): metric = 'Voltage' ceilometer-6.1.5/ceilometer/ipmi/notifications/__init__.py0000664000567000056710000000000013072744703025061 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/ipmi/__init__.py0000664000567000056710000000000013072744703022210 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/messaging.py0000664000567000056710000000736413072744706021517 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Copyright 2013-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import oslo_context.context import oslo_messaging from oslo_messaging import serializer as oslo_serializer DEFAULT_URL = "__default__" TRANSPORTS = {} def setup(): oslo_messaging.set_transport_defaults('ceilometer') def get_transport(url=None, optional=False, cache=True): """Initialise the oslo_messaging layer.""" global TRANSPORTS, DEFAULT_URL cache_key = url or DEFAULT_URL transport = TRANSPORTS.get(cache_key) if not transport or not cache: try: transport = oslo_messaging.get_transport(cfg.CONF, url) except (oslo_messaging.InvalidTransportURL, oslo_messaging.DriverLoadFailure): if not optional or url: # NOTE(sileht): oslo_messaging is configured but unloadable # so reraise the exception raise return None else: if cache: TRANSPORTS[cache_key] = transport return transport def cleanup(): """Cleanup the oslo_messaging layer.""" global TRANSPORTS, NOTIFIERS NOTIFIERS = {} for url in TRANSPORTS: TRANSPORTS[url].cleanup() del TRANSPORTS[url] class RequestContextSerializer(oslo_messaging.Serializer): def __init__(self, base): self._base = base def serialize_entity(self, context, entity): if not self._base: return entity return self._base.serialize_entity(context, entity) def deserialize_entity(self, context, entity): if not self._base: return entity return self._base.deserialize_entity(context, entity) def serialize_context(self, context): return context.to_dict() def deserialize_context(self, context): return oslo_context.context.RequestContext.from_dict(context) _SERIALIZER = RequestContextSerializer( oslo_serializer.JsonPayloadSerializer()) def get_batch_notification_listener(transport, targets, endpoints, allow_requeue=False, batch_size=1, batch_timeout=None): """Return a configured oslo_messaging notification listener.""" return oslo_messaging.get_batch_notification_listener( transport, targets, endpoints, executor='threading', allow_requeue=allow_requeue, batch_size=batch_size, batch_timeout=batch_timeout) def get_notifier(transport, publisher_id): """Return a configured oslo_messaging notifier.""" notifier = oslo_messaging.Notifier(transport, serializer=_SERIALIZER) return notifier.prepare(publisher_id=publisher_id) def convert_to_old_notification_format(priority, notification): # FIXME(sileht): temporary convert notification to old format # to focus on oslo_messaging migration before refactoring the code to # use the new oslo_messaging facilities notification = notification.copy() notification['priority'] = priority notification.update(notification["metadata"]) for k in notification['ctxt']: notification['_context_' + k] = notification['ctxt'][k] del notification['ctxt'] del notification['metadata'] return notification ceilometer-6.1.5/ceilometer/hacking/0000775000567000056710000000000013072745164020561 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/hacking/checks.py0000664000567000056710000000330413072744703022371 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Guidelines for writing new hacking checks - Use only for Ceilometer specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range X3xx. Find the current test with the highest allocated number and then pick the next value. - Keep the test method code in the source file ordered based on the C3xx value. - List the new rule in the top level HACKING.rst file """ def no_log_warn(logical_line): """Disallow 'LOG.warn(' https://bugs.launchpad.net/tempest/+bug/1508442 C301 """ if logical_line.startswith('LOG.warn('): yield(0, 'C301 Use LOG.warning() rather than LOG.warn()') def no_os_popen(logical_line): """Disallow 'os.popen(' Deprecated library function os.popen() Replace it using subprocess https://bugs.launchpad.net/tempest/+bug/1529836 C302 """ if 'os.popen(' in logical_line: yield(0, 'C302 Deprecated library function os.popen(). ' 'Replace it using subprocess module. ') def factory(register): register(no_log_warn) register(no_os_popen) ceilometer-6.1.5/ceilometer/hacking/__init__.py0000664000567000056710000000000013072744703022656 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/agent/0000775000567000056710000000000013072745164020253 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/agent/discovery/0000775000567000056710000000000013072745164022262 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/agent/discovery/localnode.py0000664000567000056710000000142313072744703024572 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.agent import plugin_base class LocalNodeDiscovery(plugin_base.DiscoveryBase): def discover(self, manager, param=None): """Return local node as resource.""" return ['local_host'] ceilometer-6.1.5/ceilometer/agent/discovery/tenant.py0000664000567000056710000000223313072744705024125 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from ceilometer.agent import plugin_base as plugin cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') class TenantDiscovery(plugin.DiscoveryBase): """Discovery that supplies keystone tenants. This discovery should be used when the pollster's work can't be divided into smaller pieces than per-tenants. Example of this is the Swift pollster, which polls account details and does so per-project. """ def discover(self, manager, param=None): tenants = manager.keystone.projects.list() return tenants or [] ceilometer-6.1.5/ceilometer/agent/discovery/endpoint.py0000664000567000056710000000341713072744705024461 0ustar jenkinsjenkins00000000000000# Copyright 2014-2015 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from ceilometer.agent import plugin_base as plugin from ceilometer.i18n import _LW from ceilometer import keystone_client LOG = log.getLogger(__name__) cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') class EndpointDiscovery(plugin.DiscoveryBase): """Discovery that supplies service endpoints. This discovery should be used when the relevant APIs are not well suited to dividing the pollster's work into smaller pieces than a whole service at once. Example of this is the floating_ip pollster which calls nova.floating_ips.list() and therefore gets all floating IPs at once. """ @staticmethod def discover(manager, param=None): endpoints = keystone_client.get_service_catalog( manager.keystone).get_urls( service_type=param, interface=cfg.CONF.service_credentials.interface, region_name=cfg.CONF.service_credentials.region_name) if not endpoints: LOG.warning(_LW('No endpoints found for service %s'), "" if param is None else param) return [] return endpoints ceilometer-6.1.5/ceilometer/agent/discovery/__init__.py0000664000567000056710000000000013072744703024357 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/agent/manager.py0000664000567000056710000004776513072744705022262 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Julien Danjou # Copyright 2014 Red Hat, Inc # # Authors: Julien Danjou # Eoghan Glynn # Nejc Saje # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import itertools import random from keystoneauth1 import exceptions as ka_exceptions from keystoneclient import exceptions as ks_exceptions from oslo_config import cfg from oslo_context import context from oslo_log import log import oslo_messaging from six import moves from six.moves.urllib import parse as urlparse from stevedore import extension from ceilometer.agent import plugin_base from ceilometer import coordination from ceilometer.i18n import _, _LE, _LI, _LW from ceilometer import keystone_client from ceilometer import messaging from ceilometer import pipeline from ceilometer.publisher import utils as publisher_utils from ceilometer import service_base from ceilometer import utils LOG = log.getLogger(__name__) OPTS = [ cfg.BoolOpt('batch_polled_samples', default=True, help='To reduce polling agent load, samples are sent to the ' 'notification agent in a batch. To gain higher ' 'throughput at the cost of load set this to False.'), cfg.IntOpt('shuffle_time_before_polling_task', default=0, help='To reduce large requests at same time to Nova or other ' 'components from different compute agents, shuffle ' 'start time of polling task.'), ] POLLING_OPTS = [ cfg.StrOpt('partitioning_group_prefix', deprecated_group='central', help='Work-load partitioning group prefix. Use only if you ' 'want to run multiple polling agents with different ' 'config files. For each sub-group of the agent ' 'pool with the same partitioning_group_prefix a disjoint ' 'subset of pollsters should be loaded.'), ] cfg.CONF.register_opts(OPTS) cfg.CONF.register_opts(POLLING_OPTS, group='polling') cfg.CONF.import_opt('telemetry_driver', 'ceilometer.publisher.messaging', group='publisher_notifier') cfg.CONF.import_group('service_types', 'ceilometer.energy.kwapi') cfg.CONF.import_group('service_types', 'ceilometer.image.glance') cfg.CONF.import_group('service_types', 'ceilometer.neutron_client') cfg.CONF.import_group('service_types', 'ceilometer.nova_client') cfg.CONF.import_group('service_types', 'ceilometer.objectstore.rgw') cfg.CONF.import_group('service_types', 'ceilometer.objectstore.swift') class PollsterListForbidden(Exception): def __init__(self): msg = ('It is forbidden to use pollster-list option of polling agent ' 'in case of using coordination between multiple agents. Please ' 'use either multiple agents being coordinated or polling list ' 'option for one polling agent.') super(PollsterListForbidden, self).__init__(msg) class Resources(object): def __init__(self, agent_manager): self.agent_manager = agent_manager self._resources = [] self._discovery = [] self.blacklist = [] self.last_dup = [] def setup(self, source): self._resources = source.resources self._discovery = source.discovery def get(self, discovery_cache=None): source_discovery = (self.agent_manager.discover(self._discovery, discovery_cache) if self._discovery else []) static_resources = [] if self._resources: static_resources_group = self.agent_manager.construct_group_id( utils.hash_of_set(self._resources)) p_coord = self.agent_manager.partition_coordinator static_resources = p_coord.extract_my_subset( static_resources_group, self._resources) return static_resources + source_discovery @staticmethod def key(source_name, pollster): return '%s-%s' % (source_name, pollster.name) class PollingTask(object): """Polling task for polling samples and notifying. A polling task can be invoked periodically or only once. """ def __init__(self, agent_manager): self.manager = agent_manager # elements of the Cartesian product of sources X pollsters # with a common interval self.pollster_matches = collections.defaultdict(set) # we relate the static resources and per-source discovery to # each combination of pollster and matching source resource_factory = lambda: Resources(agent_manager) self.resources = collections.defaultdict(resource_factory) self._batch = cfg.CONF.batch_polled_samples self._telemetry_secret = cfg.CONF.publisher.telemetry_secret def add(self, pollster, source): self.pollster_matches[source.name].add(pollster) key = Resources.key(source.name, pollster) self.resources[key].setup(source) def poll_and_notify(self): """Polling sample and notify.""" cache = {} discovery_cache = {} poll_history = {} for source_name in self.pollster_matches: for pollster in self.pollster_matches[source_name]: key = Resources.key(source_name, pollster) candidate_res = list( self.resources[key].get(discovery_cache)) if not candidate_res and pollster.obj.default_discovery: candidate_res = self.manager.discover( [pollster.obj.default_discovery], discovery_cache) # Remove duplicated resources and black resources. Using # set() requires well defined __hash__ for each resource. # Since __eq__ is defined, 'not in' is safe here. polling_resources = [] black_res = self.resources[key].blacklist history = poll_history.get(pollster.name, []) for x in candidate_res: if x not in history: history.append(x) if x not in black_res: polling_resources.append(x) poll_history[pollster.name] = history # If no resources, skip for this pollster if not polling_resources: p_context = 'new ' if history else '' LOG.info(_LI("Skip pollster %(name)s, no %(p_context)s" "resources found this cycle"), {'name': pollster.name, 'p_context': p_context}) continue LOG.info(_LI("Polling pollster %(poll)s in the context of " "%(src)s"), dict(poll=pollster.name, src=source_name)) try: samples = pollster.obj.get_samples( manager=self.manager, cache=cache, resources=polling_resources ) sample_batch = [] for sample in samples: sample_dict = ( publisher_utils.meter_message_from_counter( sample, self._telemetry_secret )) if self._batch: sample_batch.append(sample_dict) else: self._send_notification([sample_dict]) if sample_batch: self._send_notification(sample_batch) except plugin_base.PollsterPermanentError as err: LOG.error(_( 'Prevent pollster %(name)s for ' 'polling source %(source)s anymore!') % ({'name': pollster.name, 'source': source_name})) self.resources[key].blacklist.extend(err.fail_res_list) except Exception as err: LOG.warning(_( 'Continue after error from %(name)s: %(error)s') % ({'name': pollster.name, 'error': err}), exc_info=True) def _send_notification(self, samples): self.manager.notifier.sample( self.manager.context.to_dict(), 'telemetry.polling', {'samples': samples} ) class AgentManager(service_base.BaseService): def __init__(self, namespaces=None, pollster_list=None): namespaces = namespaces or ['compute', 'central'] pollster_list = pollster_list or [] group_prefix = cfg.CONF.polling.partitioning_group_prefix # features of using coordination and pollster-list are exclusive, and # cannot be used at one moment to avoid both samples duplication and # samples being lost if pollster_list and cfg.CONF.coordination.backend_url: raise PollsterListForbidden() super(AgentManager, self).__init__() def _match(pollster): """Find out if pollster name matches to one of the list.""" return any(utils.match(pollster.name, pattern) for pattern in pollster_list) if type(namespaces) is not list: namespaces = [namespaces] # we'll have default ['compute', 'central'] here if no namespaces will # be passed extensions = (self._extensions('poll', namespace).extensions for namespace in namespaces) # get the extensions from pollster builder extensions_fb = (self._extensions_from_builder('poll', namespace) for namespace in namespaces) if pollster_list: extensions = (moves.filter(_match, exts) for exts in extensions) extensions_fb = (moves.filter(_match, exts) for exts in extensions_fb) self.extensions = list(itertools.chain(*list(extensions))) + list( itertools.chain(*list(extensions_fb))) self.discovery_manager = self._extensions('discover') self.context = context.RequestContext('admin', 'admin', is_admin=True) self.partition_coordinator = coordination.PartitionCoordinator() # Compose coordination group prefix. # We'll use namespaces as the basement for this partitioning. namespace_prefix = '-'.join(sorted(namespaces)) self.group_prefix = ('%s-%s' % (namespace_prefix, group_prefix) if group_prefix else namespace_prefix) self.notifier = oslo_messaging.Notifier( messaging.get_transport(), driver=cfg.CONF.publisher_notifier.telemetry_driver, publisher_id="ceilometer.polling") self._keystone = None self._keystone_last_exception = None @staticmethod def _get_ext_mgr(namespace): def _catch_extension_load_error(mgr, ep, exc): # Extension raising ExtensionLoadError can be ignored, # and ignore anything we can't import as a safety measure. if isinstance(exc, plugin_base.ExtensionLoadError): LOG.exception(_("Skip loading extension for %s") % ep.name) return if isinstance(exc, ImportError): LOG.error(_("Failed to import extension for %(name)s: " "%(error)s"), {'name': ep.name, 'error': exc}) return raise exc return extension.ExtensionManager( namespace=namespace, invoke_on_load=True, on_load_failure_callback=_catch_extension_load_error, ) def _extensions(self, category, agent_ns=None): namespace = ('ceilometer.%s.%s' % (category, agent_ns) if agent_ns else 'ceilometer.%s' % category) return self._get_ext_mgr(namespace) def _extensions_from_builder(self, category, agent_ns=None): ns = ('ceilometer.builder.%s.%s' % (category, agent_ns) if agent_ns else 'ceilometer.builder.%s' % category) mgr = self._get_ext_mgr(ns) def _build(ext): return ext.plugin.get_pollsters_extensions() # NOTE: this seems a stevedore bug. if no extensions are found, # map will raise runtimeError which is not documented. if mgr.names(): return list(itertools.chain(*mgr.map(_build))) else: return [] def join_partitioning_groups(self): self.groups = set([self.construct_group_id(d.obj.group_id) for d in self.discovery_manager]) # let each set of statically-defined resources have its own group static_resource_groups = set([ self.construct_group_id(utils.hash_of_set(p.resources)) for p in self.polling_manager.sources if p.resources ]) self.groups.update(static_resource_groups) for group in self.groups: self.partition_coordinator.join_group(group) def create_polling_task(self): """Create an initially empty polling task.""" return PollingTask(self) def setup_polling_tasks(self): polling_tasks = {} for source in self.polling_manager.sources: polling_task = None for pollster in self.extensions: if source.support_meter(pollster.name): polling_task = polling_tasks.get(source.get_interval()) if not polling_task: polling_task = self.create_polling_task() polling_tasks[source.get_interval()] = polling_task polling_task.add(pollster, source) return polling_tasks def construct_group_id(self, discovery_group_id): return ('%s-%s' % (self.group_prefix, discovery_group_id) if discovery_group_id else None) def configure_polling_tasks(self): # allow time for coordination if necessary delay_start = self.partition_coordinator.is_active() # set shuffle time before polling task if necessary delay_polling_time = random.randint( 0, cfg.CONF.shuffle_time_before_polling_task) pollster_timers = [] data = self.setup_polling_tasks() for interval, polling_task in data.items(): delay_time = (interval + delay_polling_time if delay_start else delay_polling_time) pollster_timers.append(self.tg.add_timer(interval, self.interval_task, initial_delay=delay_time, task=polling_task)) self.tg.add_timer(cfg.CONF.coordination.heartbeat, self.partition_coordinator.heartbeat) return pollster_timers def start(self): self.polling_manager = pipeline.setup_polling() self.partition_coordinator.start() self.join_partitioning_groups() self.pollster_timers = self.configure_polling_tasks() self.init_pipeline_refresh() def stop(self): if self.partition_coordinator: self.partition_coordinator.stop() super(AgentManager, self).stop() def interval_task(self, task): # NOTE(sileht): remove the previous keystone client # and exception to get a new one in this polling cycle. self._keystone = None self._keystone_last_exception = None task.poll_and_notify() @property def keystone(self): # NOTE(sileht): we do lazy loading of the keystone client # for multiple reasons: # * don't use it if no plugin need it # * use only one client for all plugins per polling cycle if self._keystone is None and self._keystone_last_exception is None: try: self._keystone = keystone_client.get_client() self._keystone_last_exception = None except (ka_exceptions.ClientException, ks_exceptions.ClientException) as e: self._keystone = None self._keystone_last_exception = e if self._keystone is not None: return self._keystone else: raise self._keystone_last_exception @staticmethod def _parse_discoverer(url): s = urlparse.urlparse(url) return (s.scheme or s.path), (s.netloc + s.path if s.scheme else None) def _discoverer(self, name): for d in self.discovery_manager: if d.name == name: return d.obj return None def discover(self, discovery=None, discovery_cache=None): resources = [] discovery = discovery or [] for url in discovery: if discovery_cache is not None and url in discovery_cache: resources.extend(discovery_cache[url]) continue name, param = self._parse_discoverer(url) discoverer = self._discoverer(name) if discoverer: try: if discoverer.KEYSTONE_REQUIRED_FOR_SERVICE: service_type = getattr( cfg.CONF.service_types, discoverer.KEYSTONE_REQUIRED_FOR_SERVICE) if not keystone_client.get_service_catalog( self.keystone).get_endpoints( service_type=service_type): LOG.warning(_LW( 'Skipping %(name)s, %(service_type)s service ' 'is not registered in keystone'), {'name': name, 'service_type': service_type}) continue discovered = discoverer.discover(self, param) partitioned = self.partition_coordinator.extract_my_subset( self.construct_group_id(discoverer.group_id), discovered) resources.extend(partitioned) if discovery_cache is not None: discovery_cache[url] = partitioned except (ka_exceptions.ClientException, ks_exceptions.ClientException) as e: LOG.error(_LE('Skipping %(name)s, keystone issue: ' '%(exc)s'), {'name': name, 'exc': e}) except Exception as err: LOG.exception(_('Unable to discover resources: %s') % err) else: LOG.warning(_('Unknown discovery extension: %s') % name) return resources def stop_pollsters(self): for x in self.pollster_timers: try: x.stop() self.tg.timer_done(x) except Exception: LOG.error(_('Error stopping pollster.'), exc_info=True) self.pollster_timers = [] def reload_pipeline(self): if self.pipeline_validated: LOG.info(_LI("Reconfiguring polling tasks.")) # stop existing pollsters and leave partitioning groups self.stop_pollsters() for group in self.groups: self.partition_coordinator.leave_group(group) # re-create partitioning groups according to pipeline # and configure polling tasks with latest pipeline conf self.join_partitioning_groups() self.pollster_timers = self.configure_polling_tasks() ceilometer-6.1.5/ceilometer/agent/plugin_base.py0000664000567000056710000002346013072744705023122 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base class for plugins. """ import abc import collections from oslo_context import context from oslo_log import log import oslo_messaging import six from stevedore import extension from ceilometer.i18n import _LE from ceilometer import messaging LOG = log.getLogger(__name__) ExchangeTopics = collections.namedtuple('ExchangeTopics', ['exchange', 'topics']) class PluginBase(object): """Base class for all plugins.""" @six.add_metaclass(abc.ABCMeta) class NotificationBase(PluginBase): """Base class for plugins that support the notification API.""" def __init__(self, manager): super(NotificationBase, self).__init__() # NOTE(gordc): this is filter rule used by oslo.messaging to dispatch # messages to an endpoint. if self.event_types: self.filter_rule = oslo_messaging.NotificationFilter( event_type='|'.join(self.event_types)) self.manager = manager @staticmethod def get_notification_topics(conf): if 'notification_topics' in conf: return conf.notification_topics return conf.oslo_messaging_notifications.topics @abc.abstractproperty def event_types(self): """Return a sequence of strings. Strings are defining the event types to be given to this plugin. """ @abc.abstractmethod def get_targets(self, conf): """Return a sequence of oslo.messaging.Target. Sequence is defining the exchange and topics to be connected for this plugin. :param conf: Configuration. """ @abc.abstractmethod def process_notification(self, message): """Return a sequence of Counter instances for the given message. :param message: Message to process. """ @staticmethod def _consume_and_drop(notifications): """RPC endpoint for useless notification level""" # NOTE(sileht): nothing special todo here, but because we listen # for the generic notification exchange we have to consume all its # queues audit = _consume_and_drop debug = _consume_and_drop warn = _consume_and_drop error = _consume_and_drop critical = _consume_and_drop def info(self, notifications): """RPC endpoint for notification messages at info level When another service sends a notification over the message bus, this method receives it. :param notifications: list of notifications """ self._process_notifications('info', notifications) def sample(self, notifications): """RPC endpoint for notification messages at sample level When another service sends a notification over the message bus at sample priority, this method receives it. :param notifications: list of notifications """ self._process_notifications('sample', notifications) def _process_notifications(self, priority, notifications): for notification in notifications: try: notification = messaging.convert_to_old_notification_format( priority, notification) self.to_samples_and_publish(context.get_admin_context(), notification) except Exception: LOG.error(_LE('Fail to process notification'), exc_info=True) def to_samples_and_publish(self, context, notification): """Return samples produced by *process_notification*. Samples produced for the given notification. :param context: Execution context from the service or RPC call :param notification: The notification to process. """ with self.manager.publisher(context) as p: p(list(self.process_notification(notification))) class NonMetricNotificationBase(object): """Use to mark non-measurement meters There are a number of historical non-measurement meters that should really be captured as events. This common base allows us to disable these invalid meters. """ pass class ExtensionLoadError(Exception): """Error of loading pollster plugin. PollsterBase provides a hook, setup_environment, called in pollster loading to setup required HW/SW dependency. Any exception from it would be propagated as ExtensionLoadError, then skip loading this pollster. """ pass class PollsterPermanentError(Exception): """Permanent error when polling. When unrecoverable error happened in polling, pollster can raise this exception with failed resource to prevent itself from polling any more. Resource is one of parameter resources from get_samples that cause polling error. """ def __init__(self, resources): self.fail_res_list = resources @six.add_metaclass(abc.ABCMeta) class PollsterBase(PluginBase): """Base class for plugins that support the polling API.""" def setup_environment(self): """Setup required environment for pollster. Each subclass could overwrite it for specific usage. Any exception raised in this function would prevent pollster being loaded. """ pass def __init__(self): super(PollsterBase, self).__init__() try: self.setup_environment() except Exception as err: raise ExtensionLoadError(err) @abc.abstractproperty def default_discovery(self): """Default discovery to use for this pollster. There are three ways a pollster can get a list of resources to poll, listed here in ascending order of precedence: 1. from the per-agent discovery, 2. from the per-pollster discovery (defined here) 3. from the per-pipeline configured discovery and/or per-pipeline configured static resources. If a pollster should only get resources from #1 or #3, this property should be set to None. """ @abc.abstractmethod def get_samples(self, manager, cache, resources): """Return a sequence of Counter instances from polling the resources. :param manager: The service manager class invoking the plugin. :param cache: A dictionary to allow pollsters to pass data between themselves when recomputing it would be expensive (e.g., asking another service for a list of objects). :param resources: A list of resources the pollster will get data from. It's up to the specific pollster to decide how to use it. It is usually supplied by a discovery, see ``default_discovery`` for more information. """ @classmethod def build_pollsters(cls): """Return a list of tuple (name, pollster). The name is the meter name which the pollster would return, the pollster is a pollster object instance. The pollster which implements this method should be registered in the namespace of ceilometer.builder.xxx instead of ceilometer.poll.xxx. """ return [] @classmethod def get_pollsters_extensions(cls): """Return a list of stevedore extensions. The returned stevedore extensions wrap the pollster object instances returned by build_pollsters. """ extensions = [] try: for name, pollster in cls.build_pollsters(): ext = extension.Extension(name, None, cls, pollster) extensions.append(ext) except Exception as err: raise ExtensionLoadError(err) return extensions @six.add_metaclass(abc.ABCMeta) class DiscoveryBase(object): KEYSTONE_REQUIRED_FOR_SERVICE = None """Service type required in keystone catalog to works""" @abc.abstractmethod def discover(self, manager, param=None): """Discover resources to monitor. The most fine-grained discovery should be preferred, so the work is the most evenly distributed among multiple agents (if they exist). For example: if the pollster can separately poll individual resources, it should have its own discovery implementation to discover those resources. If it can only poll per-tenant, then the `TenantDiscovery` should be used. If even that is not possible, use `EndpointDiscovery` (see their respective docstrings). :param manager: The service manager class invoking the plugin. :param param: an optional parameter to guide the discovery """ @property def group_id(self): """Return group id of this discovery. All running recoveries with the same group_id should return the same set of resources at a given point in time. By default, a discovery is put into a global group, meaning that all discoveries of its type running anywhere in the cloud, return the same set of resources. This property can be overridden to provide correct grouping of localized discoveries. For example, compute discovery is localized to a host, which is reflected in its group_id. A None value signifies that this discovery does not want to be part of workload partitioning at all. """ return 'global' ceilometer-6.1.5/ceilometer/agent/__init__.py0000664000567000056710000000000013072744703022350 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/cmd/0000775000567000056710000000000013072745164017720 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/cmd/storage.py0000664000567000056710000000342713072744705021744 0ustar jenkinsjenkins00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_config import cfg from ceilometer.i18n import _LI from ceilometer import service from ceilometer import storage LOG = logging.getLogger(__name__) def dbsync(): service.prepare_service() storage.get_connection_from_config(cfg.CONF, 'metering').upgrade() storage.get_connection_from_config(cfg.CONF, 'event').upgrade() def expirer(): service.prepare_service() if cfg.CONF.database.metering_time_to_live > 0: LOG.debug("Clearing expired metering data") storage_conn = storage.get_connection_from_config(cfg.CONF, 'metering') storage_conn.clear_expired_metering_data( cfg.CONF.database.metering_time_to_live) else: LOG.info(_LI("Nothing to clean, database metering time to live " "is disabled")) if cfg.CONF.database.event_time_to_live > 0: LOG.debug("Clearing expired event data") event_conn = storage.get_connection_from_config(cfg.CONF, 'event') event_conn.clear_expired_event_data( cfg.CONF.database.event_time_to_live) else: LOG.info(_LI("Nothing to clean, database event time to live " "is disabled")) ceilometer-6.1.5/ceilometer/cmd/collector.py0000664000567000056710000000164613072744705022267 0ustar jenkinsjenkins00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_service import service as os_service from ceilometer import collector from ceilometer import service CONF = cfg.CONF def main(): service.prepare_service() os_service.launch(CONF, collector.CollectorService(), workers=CONF.collector.workers).wait() ceilometer-6.1.5/ceilometer/cmd/sample.py0000664000567000056710000000625413072744705021562 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # # Copyright 2012-2014 Julien Danjou # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command line tool for creating meter for Ceilometer. """ import logging import sys from oslo_config import cfg from oslo_context import context from oslo_utils import timeutils from stevedore import extension from ceilometer import pipeline from ceilometer import sample from ceilometer import service def send_sample(): cfg.CONF.register_cli_opts([ cfg.StrOpt('sample-name', short='n', help='Meter name.', required=True), cfg.StrOpt('sample-type', short='y', help='Meter type (gauge, delta, cumulative).', default='gauge', required=True), cfg.StrOpt('sample-unit', short='U', help='Meter unit.'), cfg.IntOpt('sample-volume', short='l', help='Meter volume value.', default=1), cfg.StrOpt('sample-resource', short='r', help='Meter resource id.', required=True), cfg.StrOpt('sample-user', short='u', help='Meter user id.'), cfg.StrOpt('sample-project', short='p', help='Meter project id.'), cfg.StrOpt('sample-timestamp', short='i', help='Meter timestamp.', default=timeutils.utcnow().isoformat()), cfg.StrOpt('sample-metadata', short='m', help='Meter metadata.'), ]) service.prepare_service() # Set up logging to use the console console = logging.StreamHandler(sys.stderr) console.setLevel(logging.DEBUG) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) root_logger = logging.getLogger('') root_logger.addHandler(console) root_logger.setLevel(logging.DEBUG) pipeline_manager = pipeline.setup_pipeline( extension.ExtensionManager('ceilometer.transformer')) with pipeline_manager.publisher(context.get_admin_context()) as p: p([sample.Sample( name=cfg.CONF.sample_name, type=cfg.CONF.sample_type, unit=cfg.CONF.sample_unit, volume=cfg.CONF.sample_volume, user_id=cfg.CONF.sample_user, project_id=cfg.CONF.sample_project, resource_id=cfg.CONF.sample_resource, timestamp=cfg.CONF.sample_timestamp, resource_metadata=cfg.CONF.sample_metadata and eval( cfg.CONF.sample_metadata))]) ceilometer-6.1.5/ceilometer/cmd/api.py0000664000567000056710000000134713072744705021050 0ustar jenkinsjenkins00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.api import app from ceilometer import service def main(): service.prepare_service() app.build_server() ceilometer-6.1.5/ceilometer/cmd/polling.py0000664000567000056710000000572413072744705021746 0ustar jenkinsjenkins00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014-2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_service import service as os_service from ceilometer.agent import manager from ceilometer.i18n import _LW from ceilometer import service LOG = log.getLogger(__name__) CONF = cfg.CONF class MultiChoicesOpt(cfg.Opt): def __init__(self, name, choices=None, **kwargs): super(MultiChoicesOpt, self).__init__( name, type=DeduplicatedCfgList(choices), **kwargs) self.choices = choices def _get_argparse_kwargs(self, group, **kwargs): """Extends the base argparse keyword dict for multi choices options.""" kwargs = super(MultiChoicesOpt, self)._get_argparse_kwargs(group) kwargs['nargs'] = '+' choices = kwargs.get('choices', self.choices) if choices: kwargs['choices'] = choices return kwargs class DeduplicatedCfgList(cfg.types.List): def __init__(self, choices=None, **kwargs): super(DeduplicatedCfgList, self).__init__(**kwargs) self.choices = choices or [] def __call__(self, *args, **kwargs): result = super(DeduplicatedCfgList, self).__call__(*args, **kwargs) result_set = set(result) if len(result) != len(result_set): LOG.warning(_LW("Duplicated values: %s found in CLI options, " "auto de-duplicated"), result) result = list(result_set) if self.choices and not (result_set <= set(self.choices)): raise Exception('Valid values are %s, but found %s' % (self.choices, result)) return result CLI_OPTS = [ MultiChoicesOpt('polling-namespaces', default=['compute', 'central'], choices=['compute', 'central', 'ipmi'], dest='polling_namespaces', help='Polling namespace(s) to be used while ' 'resource polling'), MultiChoicesOpt('pollster-list', default=[], dest='pollster_list', help='List of pollsters (or wildcard templates) to be ' 'used while polling'), ] CONF.register_cli_opts(CLI_OPTS) def main(): service.prepare_service() os_service.launch(CONF, manager.AgentManager(CONF.polling_namespaces, CONF.pollster_list)).wait() ceilometer-6.1.5/ceilometer/cmd/agent_notification.py0000664000567000056710000000166213072744705024143 0ustar jenkinsjenkins00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_service import service as os_service from ceilometer import notification from ceilometer import service CONF = cfg.CONF def main(): service.prepare_service() os_service.launch(CONF, notification.NotificationService(), workers=CONF.notification.workers).wait() ceilometer-6.1.5/ceilometer/cmd/__init__.py0000664000567000056710000000000013072744703022015 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/middleware.py0000664000567000056710000000511713072744706021651 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import oslo_messaging from ceilometer.agent import plugin_base from ceilometer import sample cfg.CONF.import_opt('nova_control_exchange', 'ceilometer.compute.notifications') cfg.CONF.import_opt('glance_control_exchange', 'ceilometer.notification') cfg.CONF.import_opt('neutron_control_exchange', 'ceilometer.network.notifications') cfg.CONF.import_opt('cinder_control_exchange', 'ceilometer.notification') OPTS = [ cfg.MultiStrOpt('http_control_exchanges', default=[cfg.CONF.nova_control_exchange, cfg.CONF.glance_control_exchange, cfg.CONF.neutron_control_exchange, cfg.CONF.cinder_control_exchange], help="Exchanges name to listen for notifications."), ] cfg.CONF.register_opts(OPTS) class HTTPRequest(plugin_base.NotificationBase, plugin_base.NonMetricNotificationBase): event_types = ['http.request'] def get_targets(self, conf): """Return a sequence of oslo_messaging.Target This sequence is defining the exchange and topics to be connected for this plugin. """ return [oslo_messaging.Target(topic=topic, exchange=exchange) for topic in self.get_notification_topics(conf) for exchange in conf.http_control_exchanges] def process_notification(self, message): yield sample.Sample.from_notification( name=message['event_type'], type=sample.TYPE_DELTA, volume=1, unit=message['event_type'].split('.')[1], user_id=message['payload']['request'].get('HTTP_X_USER_ID'), project_id=message['payload']['request'].get('HTTP_X_PROJECT_ID'), resource_id=message['payload']['request'].get( 'HTTP_X_SERVICE_NAME'), message=message) class HTTPResponse(HTTPRequest): event_types = ['http.response'] ceilometer-6.1.5/ceilometer/event/0000775000567000056710000000000013072745164020276 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/event/trait_plugins.py0000664000567000056710000002061413072744703023535 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from debtcollector import moves from oslo_log import log from oslo_utils import timeutils import six from ceilometer.i18n import _LW LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class TraitPluginBase(object): """Base class for plugins. It converts notification fields to Trait values. """ support_return_all_values = False """If True, an exception will be raised if the user expect the plugin to return one trait per match_list, but the plugin doesn't allow/support that. """ def __init__(self, **kw): """Setup the trait plugin. For each Trait definition a plugin is used on in a conversion definition, a new instance of the plugin will be created, and initialized with the parameters (if any) specified in the config file. :param kw: the parameters specified in the event definitions file. """ super(TraitPluginBase, self).__init__() @moves.moved_method('trait_values', version=6.0, removal_version="?") def trait_value(self, match_list): pass def trait_values(self, match_list): """Convert a set of fields to one or multiple Trait values. This method is called each time a trait is attempted to be extracted from a notification. It will be called *even if* no matching fields are found in the notification (in that case, the match_list will be empty). If this method returns None, the trait *will not* be added to the event. Any other value returned by this method will be used as the value for the trait. Values returned will be coerced to the appropriate type for the trait. :param match_list: A list (may be empty if no matches) of *tuples*. Each tuple is (field_path, value) where field_path is the jsonpath for that specific field. Example:: trait's fields definition: ['payload.foobar', 'payload.baz', 'payload.thing.*'] notification body: { 'message_id': '12345', 'publisher': 'someservice.host', 'payload': { 'foobar': 'test', 'thing': { 'bar': 12, 'boing': 13, } } } match_list will be: [('payload.foobar','test'), ('payload.thing.bar',12), ('payload.thing.boing',13)] Here is a plugin that emulates the default (no plugin) behavior: .. code-block:: python class DefaultPlugin(TraitPluginBase): "Plugin that returns the first field value." def __init__(self, **kw): super(DefaultPlugin, self).__init__() def trait_value(self, match_list): if not match_list: return None return [ match[1] for match in match_list] """ # For backwards compatibility for the renamed method. return [self.trait_value(match_list)] class SplitterTraitPlugin(TraitPluginBase): """Plugin that splits a piece off of a string value.""" support_return_all_values = True def __init__(self, separator=".", segment=0, max_split=None, **kw): """Setup how do split the field. :param separator: String to split on. default "." :param segment: Which segment to return. (int) default 0 :param max_split: Limit number of splits. Default: None (no limit) """ LOG.warning(_LW('split plugin is deprecated, ' 'add ".`split(%(sep)s, %(segment)d, ' '%(max_split)d)`" to your jsonpath instead') % dict(sep=separator, segment=segment, max_split=(-1 if max_split is None else max_split))) self.separator = separator self.segment = segment self.max_split = max_split super(SplitterTraitPlugin, self).__init__(**kw) def trait_values(self, match_list): return [self._trait_value(match) for match in match_list] def _trait_value(self, match): value = six.text_type(match[1]) if self.max_split is not None: values = value.split(self.separator, self.max_split) else: values = value.split(self.separator) try: return values[self.segment] except IndexError: return None class BitfieldTraitPlugin(TraitPluginBase): """Plugin to set flags on a bitfield.""" def __init__(self, initial_bitfield=0, flags=None, **kw): """Setup bitfield trait. :param initial_bitfield: (int) initial value for the bitfield Flags that are set will be OR'ed with this. :param flags: List of dictionaries defining bitflags to set depending on data in the notification. Each one has the following keys: path: jsonpath of field to match. bit: (int) number of bit to set (lsb is bit 0) value: set bit if corresponding field's value matches this. If value is not provided, bit will be set if the field exists (and is non-null), regardless of its value. """ self.initial_bitfield = initial_bitfield if flags is None: flags = [] self.flags = flags super(BitfieldTraitPlugin, self).__init__(**kw) def trait_values(self, match_list): matches = dict(match_list) bitfield = self.initial_bitfield for flagdef in self.flags: path = flagdef['path'] bit = 2 ** int(flagdef['bit']) if path in matches: if 'value' in flagdef: if matches[path] == flagdef['value']: bitfield |= bit else: bitfield |= bit return [bitfield] class TimedeltaPluginMissedFields(Exception): def __init__(self): msg = ('It is required to use two timestamp field with Timedelta ' 'plugin.') super(TimedeltaPluginMissedFields, self).__init__(msg) class TimedeltaPlugin(TraitPluginBase): """Setup timedelta meter volume of two timestamps fields. Example:: trait's fields definition: ['payload.created_at', 'payload.launched_at'] value is been created as total seconds between 'launched_at' and 'created_at' timestamps. """ # TODO(idegtiarov): refactor code to have meter_plugins separate from # trait_plugins def trait_value(self, match_list): if len(match_list) != 2: LOG.warning(_LW('Timedelta plugin is required two timestamp fields' ' to create timedelta value.')) return start, end = match_list try: start_time = timeutils.parse_isotime(start[1]) end_time = timeutils.parse_isotime(end[1]) except Exception as err: LOG.warning(_LW('Failed to parse date from set fields, both ' 'fields %(start)s and %(end)s must be datetime: ' '%(err)s') % dict(start=start[0], end=end[0], err=err) ) return return abs((end_time - start_time).total_seconds()) ceilometer-6.1.5/ceilometer/event/converter.py0000664000567000056710000003040413072744705022660 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from debtcollector import moves from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six from ceilometer import declarative from ceilometer.event.storage import models from ceilometer.i18n import _ from ceilometer import utils OPTS = [ cfg.StrOpt('definitions_cfg_file', default="event_definitions.yaml", help="Configuration file for event definitions." ), cfg.BoolOpt('drop_unmatched_notifications', default=False, help='Drop notifications if no event definition matches. ' '(Otherwise, we convert them with just the default traits)'), cfg.MultiStrOpt('store_raw', default=[], help='Store the raw notification for select priority ' 'levels (info and/or error). By default, raw details are ' 'not captured.') ] cfg.CONF.register_opts(OPTS, group='event') LOG = log.getLogger(__name__) EventDefinitionException = moves.moved_class(declarative.DefinitionException, 'EventDefinitionException', __name__, version=6.0, removal_version="?") class TraitDefinition(declarative.Definition): def __init__(self, name, trait_cfg, plugin_manager): super(TraitDefinition, self).__init__(name, trait_cfg, plugin_manager) type_name = (trait_cfg.get('type', 'text') if isinstance(trait_cfg, dict) else 'text') self.trait_type = models.Trait.get_type_by_name(type_name) if self.trait_type is None: raise declarative.DefinitionException( _("Invalid trait type '%(type)s' for trait %(trait)s") % dict(type=type_name, trait=name), self.cfg) def to_trait(self, notification_body): value = self.parse(notification_body) if value is None: return None # NOTE(mdragon): some openstack projects (mostly Nova) emit '' # for null fields for things like dates. if self.trait_type != models.Trait.TEXT_TYPE and value == '': return None value = models.Trait.convert_value(self.trait_type, value) return models.Trait(self.name, self.trait_type, value) class EventDefinition(object): DEFAULT_TRAITS = dict( service=dict(type='text', fields='publisher_id'), request_id=dict(type='text', fields='_context_request_id'), project_id=dict(type='text', fields=['payload.tenant_id', '_context_tenant']), user_id=dict(type='text', fields=['payload.user_id', '_context_user_id']), # TODO(dikonoor):tenant_id is old terminology and should # be deprecated tenant_id=dict(type='text', fields=['payload.tenant_id', '_context_tenant']), ) def __init__(self, definition_cfg, trait_plugin_mgr): self._included_types = [] self._excluded_types = [] self.traits = dict() self.cfg = definition_cfg self.raw_levels = [level.lower() for level in cfg.CONF.event.store_raw] try: event_type = definition_cfg['event_type'] traits = definition_cfg['traits'] except KeyError as err: raise declarative.DefinitionException( _("Required field %s not specified") % err.args[0], self.cfg) if isinstance(event_type, six.string_types): event_type = [event_type] for t in event_type: if t.startswith('!'): self._excluded_types.append(t[1:]) else: self._included_types.append(t) if self._excluded_types and not self._included_types: self._included_types.append('*') for trait_name in self.DEFAULT_TRAITS: self.traits[trait_name] = TraitDefinition( trait_name, self.DEFAULT_TRAITS[trait_name], trait_plugin_mgr) for trait_name in traits: self.traits[trait_name] = TraitDefinition( trait_name, traits[trait_name], trait_plugin_mgr) def included_type(self, event_type): for t in self._included_types: if utils.match(event_type, t): return True return False def excluded_type(self, event_type): for t in self._excluded_types: if utils.match(event_type, t): return True return False def match_type(self, event_type): return (self.included_type(event_type) and not self.excluded_type(event_type)) @property def is_catchall(self): return '*' in self._included_types and not self._excluded_types @staticmethod def _extract_when(body): """Extract the generated datetime from the notification.""" # NOTE: I am keeping the logic the same as it was in the collector, # However, *ALL* notifications should have a 'timestamp' field, it's # part of the notification envelope spec. If this was put here because # some openstack project is generating notifications without a # timestamp, then that needs to be filed as a bug with the offending # project (mdragon) when = body.get('timestamp', body.get('_context_timestamp')) if when: return timeutils.normalize_time(timeutils.parse_isotime(when)) return timeutils.utcnow() def to_event(self, notification_body): event_type = notification_body['event_type'] message_id = notification_body['message_id'] when = self._extract_when(notification_body) traits = (self.traits[t].to_trait(notification_body) for t in self.traits) # Only accept non-None value traits ... traits = [trait for trait in traits if trait is not None] raw = (notification_body if notification_body.get('priority') in self.raw_levels else {}) event = models.Event(message_id, event_type, when, traits, raw) return event class NotificationEventsConverter(object): """Notification Event Converter The NotificationEventsConverter handles the conversion of Notifications from openstack systems into Ceilometer Events. The conversion is handled according to event definitions in a config file. The config is a list of event definitions. Order is significant, a notification will be processed according to the LAST definition that matches it's event_type. (We use the last matching definition because that allows you to use YAML merge syntax in the definitions file.) Each definition is a dictionary with the following keys (all are required): - event_type: this is a list of notification event_types this definition will handle. These can be wildcarded with unix shell glob (not regex!) wildcards. An exclusion listing (starting with a '!') will exclude any types listed from matching. If ONLY exclusions are listed, the definition will match anything not matching the exclusions. This item can also be a string, which will be taken as equivalent to 1 item list. Examples: * ['compute.instance.exists'] will only match compute.instance.exists notifications * "compute.instance.exists" Same as above. * ["image.create", "image.delete"] will match image.create and image.delete, but not anything else. * "compute.instance.*" will match compute.instance.create.start but not image.upload * ['*.start','*.end', '!scheduler.*'] will match compute.instance.create.start, and image.delete.end, but NOT compute.instance.exists or scheduler.run_instance.start * '!image.*' matches any notification except image notifications. * ['*', '!image.*'] same as above. - traits: (dict) The keys are trait names, the values are the trait definitions. Each trait definition is a dictionary with the following keys: - type (optional): The data type for this trait. (as a string) Valid options are: 'text', 'int', 'float' and 'datetime', defaults to 'text' if not specified. - fields: a path specification for the field(s) in the notification you wish to extract. The paths can be specified with a dot syntax (e.g. 'payload.host') or dictionary syntax (e.g. 'payload[host]') is also supported. In either case, if the key for the field you are looking for contains special characters, like '.', it will need to be quoted (with double or single quotes) like so:: "payload.image_meta.'org.openstack__1__architecture'" The syntax used for the field specification is a variant of JSONPath, and is fairly flexible. (see: https://github.com/kennknowles/python-jsonpath-rw for more info) Specifications can be written to match multiple possible fields, the value for the trait will be derived from the matching fields that exist and have a non-null (i.e. is not None) values in the notification. By default the value will be the first such field. (plugins can alter that, if they wish) This configuration value is normally a string, for convenience, it can be specified as a list of specifications, which will be OR'ed together (a union query in jsonpath terms) - plugin (optional): (dictionary) with the following keys: - name: (string) name of a plugin to load - parameters: (optional) Dictionary of keyword args to pass to the plugin on initialization. See documentation on each plugin to see what arguments it accepts. For convenience, this value can also be specified as a string, which is interpreted as a plugin name, which will be loaded with no parameters. """ def __init__(self, events_config, trait_plugin_mgr, add_catchall=True): self.definitions = [ EventDefinition(event_def, trait_plugin_mgr) for event_def in reversed(events_config)] if add_catchall and not any(d.is_catchall for d in self.definitions): event_def = dict(event_type='*', traits={}) self.definitions.append(EventDefinition(event_def, trait_plugin_mgr)) def to_event(self, notification_body): event_type = notification_body['event_type'] message_id = notification_body['message_id'] edef = None for d in self.definitions: if d.match_type(event_type): edef = d break if edef is None: msg = (_('Dropping Notification %(type)s (uuid:%(msgid)s)') % dict(type=event_type, msgid=message_id)) if cfg.CONF.event.drop_unmatched_notifications: LOG.debug(msg) else: # If drop_unmatched_notifications is False, this should # never happen. (mdragon) LOG.error(msg) return None return edef.to_event(notification_body) def setup_events(trait_plugin_mgr): """Setup the event definitions from yaml config file.""" return NotificationEventsConverter( declarative.load_definitions([], cfg.CONF.event.definitions_cfg_file), trait_plugin_mgr, add_catchall=not cfg.CONF.event.drop_unmatched_notifications) ceilometer-6.1.5/ceilometer/event/endpoint.py0000664000567000056710000000527213072744705022476 0ustar jenkinsjenkins00000000000000# Copyright 2012-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_config import cfg from oslo_context import context import oslo_messaging from stevedore import extension from ceilometer.event import converter as event_converter from ceilometer.i18n import _LE from ceilometer import messaging LOG = logging.getLogger(__name__) class EventsNotificationEndpoint(object): def __init__(self, manager): super(EventsNotificationEndpoint, self).__init__() LOG.debug('Loading event definitions') self.ctxt = context.get_admin_context() self.event_converter = event_converter.setup_events( extension.ExtensionManager( namespace='ceilometer.event.trait_plugin')) self.manager = manager def info(self, notifications): """Convert message at info level to Ceilometer Event. :param notifications: list of notifications """ return self.process_notification('info', notifications) def error(self, notifications): """Convert message at error level to Ceilometer Event. :param notifications: list of notifications """ return self.process_notification('error', notifications) def process_notification(self, priority, notifications): for notification in notifications: # NOTE: the rpc layer currently rips out the notification # delivery_info, which is critical to determining the # source of the notification. This will have to get added back # later. notification = messaging.convert_to_old_notification_format( priority, notification) try: event = self.event_converter.to_event(notification) if event is not None: with self.manager.publisher(self.ctxt) as p: p(event) except Exception: if not cfg.CONF.notification.ack_on_event_error: return oslo_messaging.NotificationResult.REQUEUE LOG.error(_LE('Fail to process a notification'), exc_info=True) return oslo_messaging.NotificationResult.HANDLED ceilometer-6.1.5/ceilometer/event/storage/0000775000567000056710000000000013072745164021742 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/event/storage/impl_db2.py0000664000567000056710000000547713072744705024021 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """DB2 storage backend """ import pymongo from ceilometer.event.storage import pymongo_base from ceilometer import storage from ceilometer.storage.mongo import utils as pymongo_utils class Connection(pymongo_base.Connection): """The db2 event storage for Ceilometer.""" CONNECTION_POOL = pymongo_utils.ConnectionPool() def __init__(self, url): # Since we are using pymongo, even though we are connecting to DB2 # we still have to make sure that the scheme which used to distinguish # db2 driver from mongodb driver be replaced so that pymongo will not # produce an exception on the scheme. url = url.replace('db2:', 'mongodb:', 1) self.conn = self.CONNECTION_POOL.connect(url) # Require MongoDB 2.2 to use aggregate(), since we are using mongodb # as backend for test, the following code is necessary to make sure # that the test wont try aggregate on older mongodb during the test. # For db2, the versionArray won't be part of the server_info, so there # will not be exception when real db2 gets used as backend. server_info = self.conn.server_info() if server_info.get('sysInfo'): self._using_mongodb = True else: self._using_mongodb = False if self._using_mongodb and server_info.get('versionArray') < [2, 2]: raise storage.StorageBadVersion("Need at least MongoDB 2.2") connection_options = pymongo.uri_parser.parse_uri(url) self.db = getattr(self.conn, connection_options['database']) if connection_options.get('username'): self.db.authenticate(connection_options['username'], connection_options['password']) self.upgrade() def upgrade(self): # create collection if not present if 'event' not in self.db.conn.collection_names(): self.db.conn.create_collection('event') def clear(self): # drop_database command does nothing on db2 database since this has # not been implemented. However calling this method is important for # removal of all the empty dbs created during the test runs since # test run is against mongodb on Jenkins self.conn.drop_database(self.db.name) self.conn.close() ceilometer-6.1.5/ceilometer/event/storage/impl_hbase.py0000664000567000056710000002114413072744705024421 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator from oslo_log import log from ceilometer.event.storage import base from ceilometer.event.storage import models from ceilometer.i18n import _LE from ceilometer.storage.hbase import base as hbase_base from ceilometer.storage.hbase import utils as hbase_utils from ceilometer import utils LOG = log.getLogger(__name__) AVAILABLE_CAPABILITIES = { 'events': {'query': {'simple': True}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } class Connection(hbase_base.Connection, base.Connection): """Put the event data into a HBase database Collections: - events: - row_key: timestamp of event's generation + uuid of event in format: "%s:%s" % (ts, Event.message_id) - Column Families: f: contains the following qualifiers: - event_type: description of event's type - timestamp: time stamp of event generation - all traits for this event in format: .. code-block:: python "%s:%s" % (trait_name, trait_type) """ CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) _memory_instance = None EVENT_TABLE = "event" def __init__(self, url): super(Connection, self).__init__(url) def upgrade(self): tables = [self.EVENT_TABLE] column_families = {'f': dict(max_versions=1)} with self.conn_pool.connection() as conn: hbase_utils.create_tables(conn, tables, column_families) def clear(self): LOG.debug('Dropping HBase schema...') with self.conn_pool.connection() as conn: for table in [self.EVENT_TABLE]: try: conn.disable_table(table) except Exception: LOG.debug('Cannot disable table but ignoring error') try: conn.delete_table(table) except Exception: LOG.debug('Cannot delete table but ignoring error') def record_events(self, event_models): """Write the events to Hbase. :param event_models: a list of models.Event objects. """ error = None with self.conn_pool.connection() as conn: events_table = conn.table(self.EVENT_TABLE) for event_model in event_models: # Row key consists of timestamp and message_id from # models.Event or purposes of storage event sorted by # timestamp in the database. ts = event_model.generated row = hbase_utils.prepare_key( hbase_utils.timestamp(ts, reverse=False), event_model.message_id) event_type = event_model.event_type traits = {} if event_model.traits: for trait in event_model.traits: key = hbase_utils.prepare_key(trait.name, trait.dtype) traits[key] = trait.value record = hbase_utils.serialize_entry(traits, event_type=event_type, timestamp=ts, raw=event_model.raw) try: events_table.put(row, record) except Exception as ex: LOG.exception(_LE("Failed to record event: %s") % ex) error = ex if error: raise error def get_events(self, event_filter, limit=None): """Return an iter of models.Event objects. :param event_filter: storage.EventFilter object, consists of filters for events that are stored in database. """ if limit == 0: return q, start, stop = hbase_utils.make_events_query_from_filter( event_filter) with self.conn_pool.connection() as conn: events_table = conn.table(self.EVENT_TABLE) gen = events_table.scan(filter=q, row_start=start, row_stop=stop, limit=limit) for event_id, data in gen: traits = [] events_dict = hbase_utils.deserialize_entry(data)[0] for key, value in events_dict.items(): if isinstance(key, tuple): trait_name, trait_dtype = key traits.append(models.Trait(name=trait_name, dtype=int(trait_dtype), value=value)) ts, mess = event_id.split(':') yield models.Event( message_id=hbase_utils.unquote(mess), event_type=events_dict['event_type'], generated=events_dict['timestamp'], traits=sorted(traits, key=operator.attrgetter('dtype')), raw=events_dict['raw'] ) def get_event_types(self): """Return all event types as an iterable of strings.""" with self.conn_pool.connection() as conn: events_table = conn.table(self.EVENT_TABLE) gen = events_table.scan() event_types = set() for event_id, data in gen: events_dict = hbase_utils.deserialize_entry(data)[0] for key, value in events_dict.items(): if not isinstance(key, tuple) and key.startswith('event_type'): if value not in event_types: event_types.add(value) yield value def get_trait_types(self, event_type): """Return a dictionary containing the name and data type of the trait. Only trait types for the provided event_type are returned. :param event_type: the type of the Event """ q = hbase_utils.make_query(event_type=event_type) trait_names = set() with self.conn_pool.connection() as conn: events_table = conn.table(self.EVENT_TABLE) gen = events_table.scan(filter=q) for event_id, data in gen: events_dict = hbase_utils.deserialize_entry(data)[0] for key, value in events_dict.items(): if isinstance(key, tuple): trait_name, trait_type = key if trait_name not in trait_names: # Here we check that our method return only unique # trait types, for ex. if it is found the same trait # types in different events with equal event_type, # method will return only one trait type. It is # proposed that certain trait name could have only one # trait type. trait_names.add(trait_name) data_type = models.Trait.type_names[int(trait_type)] yield {'name': trait_name, 'data_type': data_type} def get_traits(self, event_type, trait_type=None): """Return all trait instances associated with an event_type. If trait_type is specified, only return instances of that trait type. :param event_type: the type of the Event to filter by :param trait_type: the name of the Trait to filter by """ q = hbase_utils.make_query(event_type=event_type, trait_type=trait_type) with self.conn_pool.connection() as conn: events_table = conn.table(self.EVENT_TABLE) gen = events_table.scan(filter=q) for event_id, data in gen: events_dict = hbase_utils.deserialize_entry(data)[0] for key, value in events_dict.items(): if isinstance(key, tuple): trait_name, trait_type = key yield models.Trait(name=trait_name, dtype=int(trait_type), value=value) ceilometer-6.1.5/ceilometer/event/storage/impl_mongodb.py0000664000567000056710000000640613072744705024770 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """MongoDB storage backend""" from oslo_config import cfg from oslo_log import log import pymongo from ceilometer.event.storage import pymongo_base from ceilometer import storage from ceilometer.storage import impl_mongodb from ceilometer.storage.mongo import utils as pymongo_utils LOG = log.getLogger(__name__) class Connection(pymongo_base.Connection): """Put the event data into a MongoDB database.""" CONNECTION_POOL = pymongo_utils.ConnectionPool() def __init__(self, url): # NOTE(jd) Use our own connection pooling on top of the Pymongo one. # We need that otherwise we overflow the MongoDB instance with new # connection since we instantiate a Pymongo client each time someone # requires a new storage connection. self.conn = self.CONNECTION_POOL.connect(url) # Require MongoDB 2.4 to use $setOnInsert if self.conn.server_info()['versionArray'] < [2, 4]: raise storage.StorageBadVersion("Need at least MongoDB 2.4") connection_options = pymongo.uri_parser.parse_uri(url) self.db = getattr(self.conn, connection_options['database']) if connection_options.get('username'): self.db.authenticate(connection_options['username'], connection_options['password']) # NOTE(jd) Upgrading is just about creating index, so let's do this # on connection to be sure at least the TTL is correctly updated if # needed. self.upgrade() def upgrade(self): # create collection if not present if 'event' not in self.db.conn.collection_names(): self.db.conn.create_collection('event') # Establish indexes # NOTE(idegtiarov): This indexes cover get_events, get_event_types, and # get_trait_types requests based on event_type and timestamp fields. self.db.event.create_index( [('event_type', pymongo.ASCENDING), ('timestamp', pymongo.ASCENDING)], name='event_type_idx' ) ttl = cfg.CONF.database.event_time_to_live impl_mongodb.Connection.update_ttl(ttl, 'event_ttl', 'timestamp', self.db.event) def clear(self): self.conn.drop_database(self.db.name) # Connection will be reopened automatically if needed self.conn.close() @staticmethod def clear_expired_event_data(ttl): """Clear expired data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep records for. """ LOG.debug("Clearing expired event data is based on native " "MongoDB time to live feature and going in background.") ceilometer-6.1.5/ceilometer/event/storage/impl_log.py0000664000567000056710000000203513072744705024116 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from ceilometer.event.storage import base from ceilometer.i18n import _LI LOG = log.getLogger(__name__) class Connection(base.Connection): """Log event data.""" @staticmethod def clear_expired_event_data(ttl): """Clear expired data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep records for. """ LOG.info(_LI("Dropping event data with TTL %d"), ttl) ceilometer-6.1.5/ceilometer/event/storage/impl_elasticsearch.py0000664000567000056710000002776013072744705026163 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import operator import elasticsearch as es from elasticsearch import helpers from oslo_log import log from oslo_utils import netutils from oslo_utils import timeutils import six from ceilometer.event.storage import base from ceilometer.event.storage import models from ceilometer.i18n import _LE, _LI from ceilometer import storage from ceilometer import utils LOG = log.getLogger(__name__) AVAILABLE_CAPABILITIES = { 'events': {'query': {'simple': True}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } class Connection(base.Connection): """Put the event data into an ElasticSearch db. Events in ElasticSearch are indexed by day and stored by event_type. An example document:: {"_index":"events_2014-10-21", "_type":"event_type0", "_id":"dc90e464-65ab-4a5d-bf66-ecb956b5d779", "_score":1.0, "_source":{"timestamp": "2014-10-21T20:02:09.274797" "traits": {"id4_0": "2014-10-21T20:02:09.274797", "id3_0": 0.7510790937279408, "id2_0": 5, "id1_0": "18c97ba1-3b74-441a-b948-a702a30cbce2"} } } """ CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) index_name = 'events' # NOTE(gordc): mainly for testing, data is not searchable after write, # it is only searchable after periodic refreshes. _refresh_on_write = False def __init__(self, url): url_split = netutils.urlsplit(url) self.conn = es.Elasticsearch(url_split.netloc) def upgrade(self): iclient = es.client.IndicesClient(self.conn) ts_template = { 'template': '*', 'mappings': {'_default_': {'_timestamp': {'enabled': True, 'store': True}, 'properties': {'traits': {'type': 'nested'}}}}} iclient.put_template(name='enable_timestamp', body=ts_template) def record_events(self, events): def _build_bulk_index(event_list): for ev in event_list: traits = {t.name: t.value for t in ev.traits} yield {'_op_type': 'create', '_index': '%s_%s' % (self.index_name, ev.generated.date().isoformat()), '_type': ev.event_type, '_id': ev.message_id, '_source': {'timestamp': ev.generated.isoformat(), 'traits': traits, 'raw': ev.raw}} error = None for ok, result in helpers.streaming_bulk( self.conn, _build_bulk_index(events)): if not ok: __, result = result.popitem() if result['status'] == 409: LOG.info(_LI('Duplicate event detected, skipping it: %s') % result) else: LOG.exception(_LE('Failed to record event: %s') % result) error = storage.StorageUnknownWriteError(result) if self._refresh_on_write: self.conn.indices.refresh(index='%s_*' % self.index_name) while self.conn.cluster.pending_tasks(local=True)['tasks']: pass if error: raise error def _make_dsl_from_filter(self, indices, ev_filter): q_args = {} filters = [] if ev_filter.start_timestamp: filters.append({'range': {'timestamp': {'ge': ev_filter.start_timestamp.isoformat()}}}) while indices[0] < ( '%s_%s' % (self.index_name, ev_filter.start_timestamp.date().isoformat())): del indices[0] if ev_filter.end_timestamp: filters.append({'range': {'timestamp': {'le': ev_filter.end_timestamp.isoformat()}}}) while indices[-1] > ( '%s_%s' % (self.index_name, ev_filter.end_timestamp.date().isoformat())): del indices[-1] q_args['index'] = indices if ev_filter.event_type: q_args['doc_type'] = ev_filter.event_type if ev_filter.message_id: filters.append({'term': {'_id': ev_filter.message_id}}) if ev_filter.traits_filter or ev_filter.admin_proj: trait_filters = [] or_cond = [] for t_filter in ev_filter.traits_filter or []: value = None for val_type in ['integer', 'string', 'float', 'datetime']: if t_filter.get(val_type): value = t_filter.get(val_type) if isinstance(value, six.string_types): value = value.lower() elif isinstance(value, datetime.datetime): value = value.isoformat() break if t_filter.get('op') in ['gt', 'ge', 'lt', 'le']: op = (t_filter.get('op').replace('ge', 'gte') .replace('le', 'lte')) trait_filters.append( {'range': {t_filter['key']: {op: value}}}) else: tf = {"query": {"query_string": { "query": "%s: \"%s\"" % (t_filter['key'], value)}}} if t_filter.get('op') == 'ne': tf = {"not": tf} trait_filters.append(tf) if ev_filter.admin_proj: or_cond = [{'missing': {'field': 'project_id'}}, {'term': {'project_id': ev_filter.admin_proj}}] filters.append( {'nested': {'path': 'traits', 'query': {'filtered': { 'filter': {'bool': {'must': trait_filters, 'should': or_cond}}}}}}) q_args['body'] = {'query': {'filtered': {'filter': {'bool': {'must': filters}}}}} return q_args def get_events(self, event_filter, limit=None): if limit == 0: return iclient = es.client.IndicesClient(self.conn) indices = iclient.get_mapping('%s_*' % self.index_name).keys() if indices: filter_args = self._make_dsl_from_filter(indices, event_filter) if limit is not None: filter_args['size'] = limit results = self.conn.search(fields=['_id', 'timestamp', '_type', '_source'], sort='timestamp:asc', **filter_args) trait_mappings = {} for record in results['hits']['hits']: trait_list = [] if not record['_type'] in trait_mappings: trait_mappings[record['_type']] = list( self.get_trait_types(record['_type'])) for key in record['_source']['traits'].keys(): value = record['_source']['traits'][key] for t_map in trait_mappings[record['_type']]: if t_map['name'] == key: dtype = t_map['data_type'] break else: dtype = models.Trait.TEXT_TYPE trait_list.append(models.Trait( name=key, dtype=dtype, value=models.Trait.convert_value(dtype, value))) gen_ts = timeutils.normalize_time(timeutils.parse_isotime( record['_source']['timestamp'])) yield models.Event(message_id=record['_id'], event_type=record['_type'], generated=gen_ts, traits=sorted( trait_list, key=operator.attrgetter('dtype')), raw=record['_source']['raw']) def get_event_types(self): iclient = es.client.IndicesClient(self.conn) es_mappings = iclient.get_mapping('%s_*' % self.index_name) seen_types = set() for index in es_mappings.keys(): for ev_type in es_mappings[index]['mappings'].keys(): seen_types.add(ev_type) # TODO(gordc): tests assume sorted ordering but backends are not # explicitly ordered. # NOTE: _default_ is a type that appears in all mappings but is not # real 'type' seen_types.discard('_default_') return sorted(list(seen_types)) @staticmethod def _remap_es_types(d_type): if d_type == 'string': d_type = 'text' elif d_type == 'long': d_type = 'int' elif d_type == 'double': d_type = 'float' elif d_type == 'date' or d_type == 'date_time': d_type = 'datetime' return d_type def get_trait_types(self, event_type): iclient = es.client.IndicesClient(self.conn) es_mappings = iclient.get_mapping('%s_*' % self.index_name) seen_types = [] for index in es_mappings.keys(): # if event_type exists in index and has traits if (es_mappings[index]['mappings'].get(event_type) and es_mappings[index]['mappings'][event_type]['properties'] ['traits'].get('properties')): for t_type in (es_mappings[index]['mappings'][event_type] ['properties']['traits']['properties'].keys()): d_type = (es_mappings[index]['mappings'][event_type] ['properties']['traits']['properties'] [t_type]['type']) d_type = models.Trait.get_type_by_name( self._remap_es_types(d_type)) if (t_type, d_type) not in seen_types: yield {'name': t_type, 'data_type': d_type} seen_types.append((t_type, d_type)) def get_traits(self, event_type, trait_type=None): t_types = dict((res['name'], res['data_type']) for res in self.get_trait_types(event_type)) if not t_types or (trait_type and trait_type not in t_types.keys()): return result = self.conn.search('%s_*' % self.index_name, event_type) for ev in result['hits']['hits']: if trait_type and ev['_source']['traits'].get(trait_type): yield models.Trait( name=trait_type, dtype=t_types[trait_type], value=models.Trait.convert_value( t_types[trait_type], ev['_source']['traits'][trait_type])) else: for trait in ev['_source']['traits'].keys(): yield models.Trait( name=trait, dtype=t_types[trait], value=models.Trait.convert_value( t_types[trait], ev['_source']['traits'][trait])) ceilometer-6.1.5/ceilometer/event/storage/base.py0000664000567000056710000000623313072744705023232 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ceilometer class Connection(object): """Base class for event storage system connections.""" # A dictionary representing the capabilities of this driver. CAPABILITIES = { 'events': {'query': {'simple': False}}, } STORAGE_CAPABILITIES = { 'storage': {'production_ready': False}, } def __init__(self, url): pass @staticmethod def upgrade(): """Migrate the database to `version` or the most recent version.""" @staticmethod def clear(): """Clear database.""" @staticmethod def record_events(events): """Write the events to the backend storage system. :param events: a list of model.Event objects. """ raise ceilometer.NotImplementedError('Events not implemented.') @staticmethod def get_events(event_filter, limit=None): """Return an iterable of model.Event objects.""" raise ceilometer.NotImplementedError('Events not implemented.') @staticmethod def get_event_types(): """Return all event types as an iterable of strings.""" raise ceilometer.NotImplementedError('Events not implemented.') @staticmethod def get_trait_types(event_type): """Return a dictionary containing the name and data type of the trait. Only trait types for the provided event_type are returned. :param event_type: the type of the Event """ raise ceilometer.NotImplementedError('Events not implemented.') @staticmethod def get_traits(event_type, trait_type=None): """Return all trait instances associated with an event_type. If trait_type is specified, only return instances of that trait type. :param event_type: the type of the Event to filter by :param trait_type: the name of the Trait to filter by """ raise ceilometer.NotImplementedError('Events not implemented.') @classmethod def get_capabilities(cls): """Return an dictionary with the capabilities of each driver.""" return cls.CAPABILITIES @classmethod def get_storage_capabilities(cls): """Return a dictionary representing the performance capabilities. This is needed to evaluate the performance of each driver. """ return cls.STORAGE_CAPABILITIES @staticmethod def clear_expired_event_data(ttl): """Clear expired data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep records for. """ raise ceilometer.NotImplementedError('Clearing events not implemented') ceilometer-6.1.5/ceilometer/event/storage/pymongo_base.py0000664000567000056710000001353213072744706025003 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common functions for MongoDB and DB2 backends """ from oslo_log import log import pymongo from ceilometer.event.storage import base from ceilometer.event.storage import models from ceilometer.i18n import _LE, _LI from ceilometer.storage.mongo import utils as pymongo_utils from ceilometer import utils LOG = log.getLogger(__name__) COMMON_AVAILABLE_CAPABILITIES = { 'events': {'query': {'simple': True}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } class Connection(base.Connection): """Base event Connection class for MongoDB and DB2 drivers.""" CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, COMMON_AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) def record_events(self, event_models): """Write the events to database. :param event_models: a list of models.Event objects. """ error = None for event_model in event_models: traits = [] if event_model.traits: for trait in event_model.traits: traits.append({'trait_name': trait.name, 'trait_type': trait.dtype, 'trait_value': trait.value}) try: self.db.event.insert_one( {'_id': event_model.message_id, 'event_type': event_model.event_type, 'timestamp': event_model.generated, 'traits': traits, 'raw': event_model.raw}) except pymongo.errors.DuplicateKeyError as ex: LOG.info(_LI("Duplicate event detected, skipping it: %s") % ex) except Exception as ex: LOG.exception(_LE("Failed to record event: %s") % ex) error = ex if error: raise error def get_events(self, event_filter, limit=None): """Return an iter of models.Event objects. :param event_filter: storage.EventFilter object, consists of filters for events that are stored in database. :param limit: Maximum number of results to return. """ if limit == 0: return q = pymongo_utils.make_events_query_from_filter(event_filter) if limit is not None: results = self.db.event.find(q, limit=limit) else: results = self.db.event.find(q) for event in results: traits = [] for trait in event['traits']: traits.append(models.Trait(name=trait['trait_name'], dtype=int(trait['trait_type']), value=trait['trait_value'])) yield models.Event(message_id=event['_id'], event_type=event['event_type'], generated=event['timestamp'], traits=traits, raw=event.get('raw')) def get_event_types(self): """Return all event types as an iter of strings.""" return self.db.event.distinct('event_type') def get_trait_types(self, event_type): """Return a dictionary containing the name and data type of the trait. Only trait types for the provided event_type are returned. :param event_type: the type of the Event. """ trait_names = set() events = self.db.event.find({'event_type': event_type}) for event in events: for trait in event['traits']: trait_name = trait['trait_name'] if trait_name not in trait_names: # Here we check that our method return only unique # trait types. Method will return only one trait type. It # is proposed that certain trait name could have only one # trait type. trait_names.add(trait_name) yield {'name': trait_name, 'data_type': trait['trait_type']} def get_traits(self, event_type, trait_name=None): """Return all trait instances associated with an event_type. If trait_type is specified, only return instances of that trait type. :param event_type: the type of the Event to filter by :param trait_name: the name of the Trait to filter by """ if not trait_name: events = self.db.event.find({'event_type': event_type}) else: # We choose events that simultaneously have event_type and certain # trait_name, and retrieve events contains only mentioned traits. events = self.db.event.find({'$and': [{'event_type': event_type}, {'traits.trait_name': trait_name}]}, {'traits': {'$elemMatch': {'trait_name': trait_name}} }) for event in events: for trait in event['traits']: yield models.Trait(name=trait['trait_name'], dtype=trait['trait_type'], value=trait['trait_value']) ceilometer-6.1.5/ceilometer/event/storage/impl_sqlalchemy.py0000664000567000056710000004562513072744706025514 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SQLAlchemy storage backend.""" from __future__ import absolute_import import datetime import os from oslo_config import cfg from oslo_db import exception as dbexc from oslo_db.sqlalchemy import session as db_session from oslo_log import log from oslo_utils import timeutils import sqlalchemy as sa from ceilometer.event.storage import base from ceilometer.event.storage import models as api_models from ceilometer.i18n import _LE, _LI from ceilometer import storage from ceilometer.storage.sqlalchemy import models from ceilometer import utils LOG = log.getLogger(__name__) AVAILABLE_CAPABILITIES = { 'events': {'query': {'simple': True}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } TRAIT_MAPLIST = [(api_models.Trait.NONE_TYPE, models.TraitText), (api_models.Trait.TEXT_TYPE, models.TraitText), (api_models.Trait.INT_TYPE, models.TraitInt), (api_models.Trait.FLOAT_TYPE, models.TraitFloat), (api_models.Trait.DATETIME_TYPE, models.TraitDatetime)] TRAIT_ID_TO_MODEL = dict((x, y) for x, y in TRAIT_MAPLIST) TRAIT_MODEL_TO_ID = dict((y, x) for x, y in TRAIT_MAPLIST) trait_models_dict = {'string': models.TraitText, 'integer': models.TraitInt, 'datetime': models.TraitDatetime, 'float': models.TraitFloat} def _build_trait_query(session, trait_type, key, value, op='eq'): trait_model = trait_models_dict[trait_type] op_dict = {'eq': (trait_model.value == value), 'lt': (trait_model.value < value), 'le': (trait_model.value <= value), 'gt': (trait_model.value > value), 'ge': (trait_model.value >= value), 'ne': (trait_model.value != value)} conditions = [trait_model.key == key, op_dict[op]] return (session.query(trait_model.event_id.label('ev_id')) .filter(*conditions)) class Connection(base.Connection): """Put the event data into a SQLAlchemy database. Tables:: - EventType - event definition - { id: event type id desc: description of event } - Event - event data - { id: event id message_id: message id generated = timestamp of event event_type_id = event type -> eventtype.id } - TraitInt - int trait value - { event_id: event -> event.id key: trait name value: integer value } - TraitDatetime - datetime trait value - { event_id: event -> event.id key: trait name value: datetime value } - TraitText - text trait value - { event_id: event -> event.id key: trait name value: text value } - TraitFloat - float trait value - { event_id: event -> event.id key: trait name value: float value } """ CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) def __init__(self, url): # Set max_retries to 0, since oslo.db in certain cases may attempt # to retry making the db connection retried max_retries ^ 2 times # in failure case and db reconnection has already been implemented # in storage.__init__.get_connection_from_config function options = dict(cfg.CONF.database.items()) options['max_retries'] = 0 # oslo.db doesn't support options defined by Ceilometer for opt in storage.OPTS: options.pop(opt.name, None) self._engine_facade = db_session.EngineFacade(url, **options) def upgrade(self): # NOTE(gordc): to minimise memory, only import migration when needed from oslo_db.sqlalchemy import migration path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..', 'storage', 'sqlalchemy', 'migrate_repo') engine = self._engine_facade.get_engine() from migrate import exceptions as migrate_exc from migrate.versioning import api from migrate.versioning import repository repo = repository.Repository(path) try: api.db_version(engine, repo) except migrate_exc.DatabaseNotControlledError: models.Base.metadata.create_all(engine) api.version_control(engine, repo, repo.latest) else: migration.db_sync(engine, path) def clear(self): engine = self._engine_facade.get_engine() for table in reversed(models.Base.metadata.sorted_tables): engine.execute(table.delete()) engine.dispose() def _get_or_create_event_type(self, event_type, session=None): """Check if an event type with the supplied name is already exists. If not, we create it and return the record. This may result in a flush. """ try: if session is None: session = self._engine_facade.get_session() with session.begin(subtransactions=True): et = session.query(models.EventType).filter( models.EventType.desc == event_type).first() if not et: et = models.EventType(event_type) session.add(et) except dbexc.DBDuplicateEntry: et = self._get_or_create_event_type(event_type, session) return et def record_events(self, event_models): """Write the events to SQL database via sqlalchemy. :param event_models: a list of model.Event objects. """ session = self._engine_facade.get_session() error = None for event_model in event_models: event = None try: with session.begin(): event_type = self._get_or_create_event_type( event_model.event_type, session=session) event = models.Event(event_model.message_id, event_type, event_model.generated, event_model.raw) session.add(event) session.flush() if event_model.traits: trait_map = {} for trait in event_model.traits: if trait_map.get(trait.dtype) is None: trait_map[trait.dtype] = [] trait_map[trait.dtype].append( {'event_id': event.id, 'key': trait.name, 'value': trait.value}) for dtype in trait_map.keys(): model = TRAIT_ID_TO_MODEL[dtype] session.execute(model.__table__.insert(), trait_map[dtype]) except dbexc.DBDuplicateEntry as e: LOG.info(_LI("Duplicate event detected, skipping it: %s") % e) except KeyError as e: LOG.exception(_LE('Failed to record event: %s') % e) except Exception as e: LOG.exception(_LE('Failed to record event: %s') % e) error = e if error: raise error def get_events(self, event_filter, limit=None): """Return an iterable of model.Event objects. :param event_filter: EventFilter instance """ if limit == 0: return session = self._engine_facade.get_session() with session.begin(): # Build up the join conditions event_join_conditions = [models.EventType.id == models.Event.event_type_id] if event_filter.event_type: event_join_conditions.append(models.EventType.desc == event_filter.event_type) # Build up the where conditions event_filter_conditions = [] if event_filter.message_id: event_filter_conditions.append( models.Event.message_id == event_filter.message_id) if event_filter.start_timestamp: event_filter_conditions.append( models.Event.generated >= event_filter.start_timestamp) if event_filter.end_timestamp: event_filter_conditions.append( models.Event.generated <= event_filter.end_timestamp) trait_subq = None # Build trait filter if event_filter.traits_filter: filters = list(event_filter.traits_filter) trait_filter = filters.pop() key = trait_filter.pop('key') op = trait_filter.pop('op', 'eq') trait_type, value = list(trait_filter.items())[0] trait_subq = _build_trait_query(session, trait_type, key, value, op) for trait_filter in filters: key = trait_filter.pop('key') op = trait_filter.pop('op', 'eq') trait_type, value = list(trait_filter.items())[0] q = _build_trait_query(session, trait_type, key, value, op) trait_subq = trait_subq.filter( trait_subq.subquery().c.ev_id == q.subquery().c.ev_id) trait_subq = trait_subq.subquery() query = (session.query(models.Event.id) .join(models.EventType, sa.and_(*event_join_conditions))) if trait_subq is not None: query = query.join(trait_subq, trait_subq.c.ev_id == models.Event.id) if event_filter.admin_proj: no_proj_q = session.query(models.TraitText.event_id).filter( models.TraitText.key == 'project_id') admin_q = (session.query(models.TraitText.event_id).filter( ~sa.exists().where(models.TraitText.event_id == no_proj_q.subquery().c.event_id)).union( session.query(models.TraitText.event_id).filter(sa.and_( models.TraitText.key == 'project_id', models.TraitText.value == event_filter.admin_proj, models.Event.id == models.TraitText.event_id)))) query = query.filter(sa.exists().where( models.Event.id == admin_q.subquery().c.trait_text_event_id)) if event_filter_conditions: query = query.filter(sa.and_(*event_filter_conditions)) query = query.order_by(models.Event.generated).limit(limit) event_list = {} # get a list of all events that match filters for (id_, generated, message_id, desc, raw) in query.add_columns( models.Event.generated, models.Event.message_id, models.EventType.desc, models.Event.raw).all(): event_list[id_] = api_models.Event(message_id, desc, generated, [], raw) # Query all traits related to events. # NOTE (gordc): cast is done because pgsql defaults to TEXT when # handling unknown values such as null. trait_q = ( session.query( models.TraitDatetime.event_id, models.TraitDatetime.key, models.TraitDatetime.value, sa.cast(sa.null(), sa.Integer), sa.cast(sa.null(), sa.Float(53)), sa.cast(sa.null(), sa.String(255))) .filter(sa.exists().where( models.TraitDatetime.event_id == query.subquery().c.id)) ).union_all( session.query( models.TraitInt.event_id, models.TraitInt.key, sa.null(), models.TraitInt.value, sa.null(), sa.null()) .filter(sa.exists().where( models.TraitInt.event_id == query.subquery().c.id)), session.query( models.TraitFloat.event_id, models.TraitFloat.key, sa.null(), sa.null(), models.TraitFloat.value, sa.null()) .filter(sa.exists().where( models.TraitFloat.event_id == query.subquery().c.id)), session.query( models.TraitText.event_id, models.TraitText.key, sa.null(), sa.null(), sa.null(), models.TraitText.value) .filter(sa.exists().where( models.TraitText.event_id == query.subquery().c.id))) for id_, key, t_date, t_int, t_float, t_text in ( trait_q.order_by(models.TraitDatetime.key)).all(): if t_int is not None: dtype = api_models.Trait.INT_TYPE val = t_int elif t_float is not None: dtype = api_models.Trait.FLOAT_TYPE val = t_float elif t_date is not None: dtype = api_models.Trait.DATETIME_TYPE val = t_date else: dtype = api_models.Trait.TEXT_TYPE val = t_text try: trait_model = api_models.Trait(key, dtype, val) event_list[id_].append_trait(trait_model) except KeyError: # NOTE(gordc): this is expected as we do not set REPEATABLE # READ (bug 1506717). if query is run while recording new # event data, trait query may return more data than event # query. they can be safely discarded. pass return event_list.values() def get_event_types(self): """Return all event types as an iterable of strings.""" session = self._engine_facade.get_session() with session.begin(): query = (session.query(models.EventType.desc). order_by(models.EventType.desc)) for name in query.all(): # The query returns a tuple with one element. yield name[0] def get_trait_types(self, event_type): """Return a dictionary containing the name and data type of the trait. Only trait types for the provided event_type are returned. :param event_type: the type of the Event """ session = self._engine_facade.get_session() with session.begin(): for trait_model in [models.TraitText, models.TraitInt, models.TraitFloat, models.TraitDatetime]: query = (session.query(trait_model.key) .join(models.Event, models.Event.id == trait_model.event_id) .join(models.EventType, sa.and_(models.EventType.id == models.Event.event_type_id, models.EventType.desc == event_type)) .distinct()) dtype = TRAIT_MODEL_TO_ID.get(trait_model) for row in query.all(): yield {'name': row[0], 'data_type': dtype} def get_traits(self, event_type, trait_type=None): """Return all trait instances associated with an event_type. If trait_type is specified, only return instances of that trait type. :param event_type: the type of the Event to filter by :param trait_type: the name of the Trait to filter by """ session = self._engine_facade.get_session() with session.begin(): for trait_model in [models.TraitText, models.TraitInt, models.TraitFloat, models.TraitDatetime]: query = (session.query(trait_model.key, trait_model.value) .join(models.Event, models.Event.id == trait_model.event_id) .join(models.EventType, sa.and_(models.EventType.id == models.Event.event_type_id, models.EventType.desc == event_type)) .order_by(trait_model.key)) if trait_type: query = query.filter(trait_model.key == trait_type) dtype = TRAIT_MODEL_TO_ID.get(trait_model) for k, v in query.all(): yield api_models.Trait(name=k, dtype=dtype, value=v) def clear_expired_event_data(self, ttl): """Clear expired data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep records for. """ session = self._engine_facade.get_session() with session.begin(): end = timeutils.utcnow() - datetime.timedelta(seconds=ttl) event_q = (session.query(models.Event.id) .filter(models.Event.generated < end)) event_subq = event_q.subquery() for trait_model in [models.TraitText, models.TraitInt, models.TraitFloat, models.TraitDatetime]: (session.query(trait_model) .filter(trait_model.event_id.in_(event_subq)) .delete(synchronize_session="fetch")) event_rows = event_q.delete() # remove EventType and TraitType with no corresponding # matching events and traits (session.query(models.EventType) .filter(~models.EventType.events.any()) .delete(synchronize_session="fetch")) LOG.info(_LI("%d events are removed from database"), event_rows) ceilometer-6.1.5/ceilometer/event/storage/__init__.py0000664000567000056710000000000013072744703024037 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/event/storage/models.py0000664000567000056710000001015613072744703023600 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Model classes for use in the events storage API. """ from oslo_utils import timeutils import six from ceilometer.storage import base def serialize_dt(value): """Serializes parameter if it is datetime.""" return value.isoformat() if hasattr(value, 'isoformat') else value class Event(base.Model): """A raw event from the source system. Events have Traits. Metrics will be derived from one or more Events. """ DUPLICATE = 1 UNKNOWN_PROBLEM = 2 INCOMPATIBLE_TRAIT = 3 def __init__(self, message_id, event_type, generated, traits, raw): """Create a new event. :param message_id: Unique ID for the message this event stemmed from. This is different than the Event ID, which comes from the underlying storage system. :param event_type: The type of the event. :param generated: UTC time for when the event occurred. :param traits: list of Traits on this Event. :param raw: Unindexed raw notification details. """ base.Model.__init__(self, message_id=message_id, event_type=event_type, generated=generated, traits=traits, raw=raw) def append_trait(self, trait_model): self.traits.append(trait_model) def __repr__(self): trait_list = [] if self.traits: trait_list = [six.text_type(trait) for trait in self.traits] return ("" % (self.message_id, self.event_type, self.generated, " ".join(trait_list))) def serialize(self): return {'message_id': self.message_id, 'event_type': self.event_type, 'generated': serialize_dt(self.generated), 'traits': [trait.serialize() for trait in self.traits], 'raw': self.raw} class Trait(base.Model): """A Trait is a key/value pair of data on an Event. The value is variant record of basic data types (int, date, float, etc). """ NONE_TYPE = 0 TEXT_TYPE = 1 INT_TYPE = 2 FLOAT_TYPE = 3 DATETIME_TYPE = 4 type_names = { NONE_TYPE: "none", TEXT_TYPE: "string", INT_TYPE: "integer", FLOAT_TYPE: "float", DATETIME_TYPE: "datetime" } def __init__(self, name, dtype, value): if not dtype: dtype = Trait.NONE_TYPE base.Model.__init__(self, name=name, dtype=dtype, value=value) def __repr__(self): return "" % (self.name, self.dtype, self.value) def serialize(self): return self.name, self.dtype, serialize_dt(self.value) def get_type_name(self): return self.get_name_by_type(self.dtype) @classmethod def get_type_by_name(cls, type_name): return getattr(cls, '%s_TYPE' % type_name.upper(), None) @classmethod def get_type_names(cls): return cls.type_names.values() @classmethod def get_name_by_type(cls, type_id): return cls.type_names.get(type_id, "none") @classmethod def convert_value(cls, trait_type, value): if trait_type is cls.INT_TYPE: return int(value) if trait_type is cls.FLOAT_TYPE: return float(value) if trait_type is cls.DATETIME_TYPE: return timeutils.normalize_time(timeutils.parse_isotime(value)) # Cropping the text value to match the TraitText value size if isinstance(value, six.binary_type): return value.decode('utf-8')[:255] return six.text_type(value)[:255] ceilometer-6.1.5/ceilometer/event/__init__.py0000664000567000056710000000000013072744703022373 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/telemetry/0000775000567000056710000000000013072745164021167 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/telemetry/notifications.py0000664000567000056710000000437213072744706024421 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import oslo_messaging from ceilometer.agent import plugin_base from ceilometer import sample OPTS = [ cfg.StrOpt('ceilometer_control_exchange', default='ceilometer', help="Exchange name for ceilometer notifications."), ] cfg.CONF.register_opts(OPTS) class TelemetryBase(plugin_base.NotificationBase): """Convert telemetry notification into Samples.""" def get_targets(self, conf): """Return a sequence of oslo_messaging.Target Sequence defining the exchange and topics to be connected for this plugin. """ return [oslo_messaging.Target( topic=topic, exchange=conf.ceilometer_control_exchange) for topic in self.get_notification_topics(conf)] class TelemetryIpc(TelemetryBase): """Handle sample from notification bus Telemetry samples can be posted via API or polled by Polling agent. """ event_types = ['telemetry.api', 'telemetry.polling'] def process_notification(self, message): samples = message['payload']['samples'] for sample_dict in samples: yield sample.Sample( name=sample_dict['counter_name'], type=sample_dict['counter_type'], unit=sample_dict['counter_unit'], volume=sample_dict['counter_volume'], user_id=sample_dict['user_id'], project_id=sample_dict['project_id'], resource_id=sample_dict['resource_id'], timestamp=sample_dict['timestamp'], resource_metadata=sample_dict['resource_metadata'], source=sample_dict['source'], id=sample_dict['message_id']) ceilometer-6.1.5/ceilometer/telemetry/__init__.py0000664000567000056710000000000013072744703023264 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/keystone_client.py0000664000567000056710000001476513072744706022744 0ustar jenkinsjenkins00000000000000# # Copyright 2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from keystoneauth1 import exceptions as ka_exception from keystoneauth1 import identity as ka_identity from keystoneauth1 import loading as ka_loading from keystoneclient.v3 import client as ks_client_v3 from oslo_config import cfg from oslo_log import log LOG = log.getLogger(__name__) CFG_GROUP = "service_credentials" def get_session(requests_session=None): """Get a ceilometer service credentials auth session.""" auth_plugin = ka_loading.load_auth_from_conf_options(cfg.CONF, CFG_GROUP) session = ka_loading.load_session_from_conf_options( cfg.CONF, CFG_GROUP, auth=auth_plugin, session=requests_session ) return session def get_client(trust_id=None, requests_session=None): """Return a client for keystone v3 endpoint, optionally using a trust.""" session = get_session(requests_session=requests_session) return ks_client_v3.Client(session=session, trust_id=trust_id) def get_service_catalog(client): return client.session.auth.get_access(client.session).service_catalog def get_auth_token(client): return client.session.auth.get_access(client.session).auth_token def get_client_on_behalf_user(auth_plugin, trust_id=None, requests_session=None): """Return a client for keystone v3 endpoint, optionally using a trust.""" session = ka_loading.load_session_from_conf_options( cfg.CONF, CFG_GROUP, auth=auth_plugin, session=requests_session ) return ks_client_v3.Client(session=session, trust_id=trust_id) def create_trust_id(trustor_user_id, trustor_project_id, roles, auth_plugin): """Create a new trust using the ceilometer service user.""" admin_client = get_client() trustee_user_id = admin_client.auth_ref.user_id client = get_client_on_behalf_user(auth_plugin=auth_plugin) trust = client.trusts.create(trustor_user=trustor_user_id, trustee_user=trustee_user_id, project=trustor_project_id, impersonation=True, role_names=roles) return trust.id def delete_trust_id(trust_id, auth_plugin): """Delete a trust previously setup for the ceilometer user.""" client = get_client_on_behalf_user(auth_plugin=auth_plugin) try: client.trusts.delete(trust_id) except ka_exception.NotFound: pass CLI_OPTS = [ cfg.StrOpt('region-name', deprecated_group="DEFAULT", deprecated_name="os-region-name", default=os.environ.get('OS_REGION_NAME'), help='Region name to use for OpenStack service endpoints.'), cfg.StrOpt('interface', default=os.environ.get( 'OS_INTERFACE', os.environ.get('OS_ENDPOINT_TYPE', 'public')), deprecated_name="os-endpoint-type", choices=('public', 'internal', 'admin', 'auth', 'publicURL', 'internalURL', 'adminURL'), help='Type of endpoint in Identity service catalog to use for ' 'communication with OpenStack services.'), ] cfg.CONF.register_cli_opts(CLI_OPTS, group=CFG_GROUP) def register_keystoneauth_opts(conf): ka_loading.register_auth_conf_options(conf, CFG_GROUP) ka_loading.register_session_conf_options( conf, CFG_GROUP, deprecated_opts={'cacert': [ cfg.DeprecatedOpt('os-cacert', group=CFG_GROUP), cfg.DeprecatedOpt('os-cacert', group="DEFAULT")] }) conf.set_default("auth_type", default="password-ceilometer-legacy", group=CFG_GROUP) def setup_keystoneauth(conf): if conf[CFG_GROUP].auth_type == "password-ceilometer-legacy": LOG.warning("Value 'password-ceilometer-legacy' for '[%s]/auth_type' " "is deprecated. And will be removed in Ceilometer 7.0. " "Use 'password' instead.", CFG_GROUP) ka_loading.load_auth_from_conf_options(conf, CFG_GROUP) class LegacyCeilometerKeystoneLoader(ka_loading.BaseLoader): @property def plugin_class(self): return ka_identity.V2Password def get_options(self): options = super(LegacyCeilometerKeystoneLoader, self).get_options() options.extend([ ka_loading.Opt( 'os-username', default=os.environ.get('OS_USERNAME', 'ceilometer'), help='User name to use for OpenStack service access.'), ka_loading.Opt( 'os-password', secret=True, default=os.environ.get('OS_PASSWORD', 'admin'), help='Password to use for OpenStack service access.'), ka_loading.Opt( 'os-tenant-id', default=os.environ.get('OS_TENANT_ID', ''), help='Tenant ID to use for OpenStack service access.'), ka_loading.Opt( 'os-tenant-name', default=os.environ.get('OS_TENANT_NAME', 'admin'), help='Tenant name to use for OpenStack service access.'), ka_loading.Opt( 'os-auth-url', default=os.environ.get('OS_AUTH_URL', 'http://localhost:5000/v2.0'), help='Auth URL to use for OpenStack service access.'), ]) return options def load_from_options(self, **kwargs): options_map = { 'os_auth_url': 'auth_url', 'os_username': 'username', 'os_password': 'password', 'os_tenant_name': 'tenant_name', 'os_tenant_id': 'tenant_id', } identity_kwargs = dict((options_map[o.dest], kwargs.get(o.dest) or o.default) for o in self.get_options() if o.dest in options_map) return self.plugin_class(**identity_kwargs) ceilometer-6.1.5/ceilometer/coordination.py0000664000567000056710000001672113072744705022226 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_config import cfg from oslo_log import log import retrying import tooz.coordination from ceilometer.i18n import _LE, _LI, _LW from ceilometer import utils LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('backend_url', help='The backend URL to use for distributed coordination. If ' 'left empty, per-deployment central agent and per-host ' 'compute agent won\'t do workload ' 'partitioning and will only function correctly if a ' 'single instance of that service is running.'), cfg.FloatOpt('heartbeat', default=1.0, help='Number of seconds between heartbeats for distributed ' 'coordination.'), cfg.FloatOpt('check_watchers', default=10.0, help='Number of seconds between checks to see if group ' 'membership has changed') ] cfg.CONF.register_opts(OPTS, group='coordination') class MemberNotInGroupError(Exception): def __init__(self, group_id, members, my_id): super(MemberNotInGroupError, self).__init__(_LE( 'Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: ' 'Current agent is not part of group and cannot take tasks') % {'group_id': group_id, 'members': members, 'me': my_id}) def retry_on_member_not_in_group(exception): return isinstance(exception, MemberNotInGroupError) class PartitionCoordinator(object): """Workload partitioning coordinator. This class uses the `tooz` library to manage group membership. To ensure that the other agents know this agent is still alive, the `heartbeat` method should be called periodically. Coordination errors and reconnects are handled under the hood, so the service using the partition coordinator need not care whether the coordination backend is down. The `extract_my_subset` will simply return an empty iterable in this case. """ def __init__(self, my_id=None): self._coordinator = None self._groups = set() self._my_id = my_id or str(uuid.uuid4()) def start(self): backend_url = cfg.CONF.coordination.backend_url if backend_url: try: self._coordinator = tooz.coordination.get_coordinator( backend_url, self._my_id) self._coordinator.start() LOG.info(_LI('Coordination backend started successfully.')) except tooz.coordination.ToozError: LOG.exception(_LE('Error connecting to coordination backend.')) def stop(self): if not self._coordinator: return for group in list(self._groups): self.leave_group(group) try: self._coordinator.stop() except tooz.coordination.ToozError: LOG.exception(_LE('Error connecting to coordination backend.')) finally: self._coordinator = None def is_active(self): return self._coordinator is not None def heartbeat(self): if self._coordinator: if not self._coordinator.is_started: # re-connect self.start() try: self._coordinator.heartbeat() except tooz.coordination.ToozError: LOG.exception(_LE('Error sending a heartbeat to coordination ' 'backend.')) def watch_group(self, namespace, callback): if self._coordinator: self._coordinator.watch_join_group(namespace, callback) self._coordinator.watch_leave_group(namespace, callback) def run_watchers(self): if self._coordinator: self._coordinator.run_watchers() def join_group(self, group_id): if (not self._coordinator or not self._coordinator.is_started or not group_id): return while True: try: join_req = self._coordinator.join_group(group_id) join_req.get() LOG.info(_LI('Joined partitioning group %s'), group_id) break except tooz.coordination.MemberAlreadyExist: return except tooz.coordination.GroupNotCreated: create_grp_req = self._coordinator.create_group(group_id) try: create_grp_req.get() except tooz.coordination.GroupAlreadyExist: pass except tooz.coordination.ToozError: LOG.exception(_LE('Error joining partitioning group %s,' ' re-trying'), group_id) self._groups.add(group_id) def leave_group(self, group_id): if group_id not in self._groups: return if self._coordinator: self._coordinator.leave_group(group_id) self._groups.remove(group_id) LOG.info(_LI('Left partitioning group %s'), group_id) def _get_members(self, group_id): if not self._coordinator: return [self._my_id] while True: get_members_req = self._coordinator.get_members(group_id) try: return get_members_req.get() except tooz.coordination.GroupNotCreated: self.join_group(group_id) @retrying.retry(stop_max_attempt_number=5, wait_random_max=2000, retry_on_exception=retry_on_member_not_in_group) def extract_my_subset(self, group_id, iterable, attempt=0): """Filters an iterable, returning only objects assigned to this agent. We have a list of objects and get a list of active group members from `tooz`. We then hash all the objects into buckets and return only the ones that hashed into *our* bucket. """ if not group_id: return iterable if group_id not in self._groups: self.join_group(group_id) try: members = self._get_members(group_id) LOG.debug('Members of group: %s, Me: %s', members, self._my_id) if self._my_id not in members: LOG.warning(_LW('Cannot extract tasks because agent failed to ' 'join group properly. Rejoining group.')) self.join_group(group_id) members = self._get_members(group_id) if self._my_id not in members: raise MemberNotInGroupError(group_id, members, self._my_id) hr = utils.HashRing(members) filtered = [v for v in iterable if hr.get_node(str(v)) == self._my_id] LOG.debug('My subset: %s', [str(f) for f in filtered]) return filtered except tooz.coordination.ToozError: LOG.exception(_LE('Error getting group membership info from ' 'coordination backend.')) return [] ceilometer-6.1.5/ceilometer/declarative.py0000664000567000056710000001451713072744705022022 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from jsonpath_rw_ext import parser from oslo_config import cfg from oslo_log import log import six import yaml from ceilometer.i18n import _, _LI LOG = log.getLogger(__name__) class DefinitionException(Exception): def __init__(self, message, definition_cfg): super(DefinitionException, self).__init__(message) self.definition_cfg = definition_cfg class Definition(object): JSONPATH_RW_PARSER = parser.ExtentedJsonPathParser() GETTERS_CACHE = {} def __init__(self, name, cfg, plugin_manager): self.cfg = cfg self.name = name self.plugin = None if isinstance(cfg, dict): if 'fields' not in cfg: raise DefinitionException( _("The field 'fields' is required for %s") % name, self.cfg) if 'plugin' in cfg: plugin_cfg = cfg['plugin'] if isinstance(plugin_cfg, six.string_types): plugin_name = plugin_cfg plugin_params = {} else: try: plugin_name = plugin_cfg['name'] except KeyError: raise DefinitionException( _('Plugin specified, but no plugin name supplied ' 'for %s') % name, self.cfg) plugin_params = plugin_cfg.get('parameters') if plugin_params is None: plugin_params = {} try: plugin_ext = plugin_manager[plugin_name] except KeyError: raise DefinitionException( _('No plugin named %(plugin)s available for ' '%(name)s') % dict( plugin=plugin_name, name=name), self.cfg) plugin_class = plugin_ext.plugin self.plugin = plugin_class(**plugin_params) fields = cfg['fields'] else: # Simple definition "foobar: jsonpath" fields = cfg if isinstance(fields, list): # NOTE(mdragon): if not a string, we assume a list. if len(fields) == 1: fields = fields[0] else: fields = '|'.join('(%s)' % path for path in fields) if isinstance(fields, six.integer_types): self.getter = fields else: try: self.getter = self.make_getter(fields) except Exception as e: raise DefinitionException( _("Parse error in JSONPath specification " "'%(jsonpath)s' for %(name)s: %(err)s") % dict(jsonpath=fields, name=name, err=e), self.cfg) def _get_path(self, match): if match.context is not None: for path_element in self._get_path(match.context): yield path_element yield str(match.path) def parse(self, obj, return_all_values=False): if callable(self.getter): values = self.getter(obj) else: return self.getter values = [match for match in values if return_all_values or match.value is not None] if self.plugin is not None: if return_all_values and not self.plugin.support_return_all_values: raise DefinitionException("Plugin %s don't allows to " "return multiple values" % self.cfg["plugin"]["name"], self.cfg) values_map = [('.'.join(self._get_path(match)), match.value) for match in values] values = [v for v in self.plugin.trait_values(values_map) if v is not None] else: values = [match.value for match in values if match is not None] if return_all_values: return values else: return values[0] if values else None def make_getter(self, fields): if fields in self.GETTERS_CACHE: return self.GETTERS_CACHE[fields] else: getter = self.JSONPATH_RW_PARSER.parse(fields).find self.GETTERS_CACHE[fields] = getter return getter def load_definitions(defaults, config_file, fallback_file=None): """Setup a definitions from yaml config file.""" if not os.path.exists(config_file): config_file = cfg.CONF.find_file(config_file) if not config_file and fallback_file is not None: LOG.debug("No Definitions configuration file found!" "Using default config.") config_file = fallback_file if config_file is not None: LOG.debug("Loading definitions configuration file: %s", config_file) with open(config_file) as cf: config = cf.read() try: definition_cfg = yaml.safe_load(config) except yaml.YAMLError as err: if hasattr(err, 'problem_mark'): mark = err.problem_mark errmsg = (_("Invalid YAML syntax in Definitions file " "%(file)s at line: %(line)s, column: %(column)s.") % dict(file=config_file, line=mark.line + 1, column=mark.column + 1)) else: errmsg = (_("YAML error reading Definitions file " "%(file)s") % dict(file=config_file)) LOG.error(errmsg) raise else: LOG.debug("No Definitions configuration file found!" "Using default config.") definition_cfg = defaults LOG.info(_LI("Definitions: %s"), definition_cfg) return definition_cfg ceilometer-6.1.5/ceilometer/i18n.py0000664000567000056710000000252613072744703020311 0ustar jenkinsjenkins00000000000000# Copyright 2014 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See http://docs.openstack.org/developer/oslo.i18n/usage.html """ import oslo_i18n DOMAIN = 'ceilometer' _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical def translate(value, user_locale): return oslo_i18n.translate(value, user_locale) def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) ceilometer-6.1.5/ceilometer/conf/0000775000567000056710000000000013072745164020102 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/conf/defaults.py0000664000567000056710000000335013072744705022264 0ustar jenkinsjenkins00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_middleware import cors def set_cors_middleware_defaults(): """Update default configuration options for oslo.middleware.""" # CORS Defaults # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/ cfg.set_defaults(cors.CORS_OPTS, allow_headers=['X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-Openstack-Request-Id'], expose_headers=['X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-Openstack-Request-Id'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) ceilometer-6.1.5/ceilometer/conf/__init__.py0000664000567000056710000000000013072744703022177 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/utils.py0000664000567000056710000002202613072744706020672 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" import bisect import calendar import copy import datetime import decimal import fnmatch import hashlib import re import struct import sys from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import timeutils from oslo_utils import units import six OPTS = [ cfg.StrOpt('rootwrap_config', default="/etc/ceilometer/rootwrap.conf", help='Path to the rootwrap configuration file to' 'use for running commands as root'), ] CONF = cfg.CONF CONF.register_opts(OPTS) EPOCH_TIME = datetime.datetime(1970, 1, 1) def _get_root_helper(): return 'sudo ceilometer-rootwrap %s' % CONF.rootwrap_config def execute(*cmd, **kwargs): """Convenience wrapper around oslo's execute() method.""" if 'run_as_root' in kwargs and 'root_helper' not in kwargs: kwargs['root_helper'] = _get_root_helper() return processutils.execute(*cmd, **kwargs) def decode_unicode(input): """Decode the unicode of the message, and encode it into utf-8.""" if isinstance(input, dict): temp = {} # If the input data is a dict, create an equivalent dict with a # predictable insertion order to avoid inconsistencies in the # message signature computation for equivalent payloads modulo # ordering for key, value in sorted(six.iteritems(input)): temp[decode_unicode(key)] = decode_unicode(value) return temp elif isinstance(input, (tuple, list)): # When doing a pair of JSON encode/decode operations to the tuple, # the tuple would become list. So we have to generate the value as # list here. return [decode_unicode(element) for element in input] elif six.PY2 and isinstance(input, six.text_type): return input.encode('utf-8') elif six.PY3 and isinstance(input, six.binary_type): return input.decode('utf-8') else: return input def recursive_keypairs(d, separator=':'): """Generator that produces sequence of keypairs for nested dictionaries.""" for name, value in sorted(six.iteritems(d)): if isinstance(value, dict): for subname, subvalue in recursive_keypairs(value, separator): yield ('%s%s%s' % (name, separator, subname), subvalue) elif isinstance(value, (tuple, list)): yield name, decode_unicode(value) else: yield name, value def restore_nesting(d, separator=':'): """Unwinds a flattened dict to restore nesting.""" d = copy.copy(d) if any([separator in k for k in d.keys()]) else d for k, v in d.copy().items(): if separator in k: top, rem = k.split(separator, 1) nest = d[top] if isinstance(d.get(top), dict) else {} nest[rem] = v d[top] = restore_nesting(nest, separator) del d[k] return d def dt_to_decimal(utc): """Datetime to Decimal. Some databases don't store microseconds in datetime so we always store as Decimal unixtime. """ if utc is None: return None decimal.getcontext().prec = 30 return (decimal.Decimal(str(calendar.timegm(utc.utctimetuple()))) + (decimal.Decimal(str(utc.microsecond)) / decimal.Decimal("1000000.0"))) def decimal_to_dt(dec): """Return a datetime from Decimal unixtime format.""" if dec is None: return None integer = int(dec) micro = (dec - decimal.Decimal(integer)) * decimal.Decimal(units.M) daittyme = datetime.datetime.utcfromtimestamp(integer) return daittyme.replace(microsecond=int(round(micro))) def sanitize_timestamp(timestamp): """Return a naive utc datetime object.""" if not timestamp: return timestamp if not isinstance(timestamp, datetime.datetime): timestamp = timeutils.parse_isotime(timestamp) return timeutils.normalize_time(timestamp) def stringify_timestamps(data): """Stringify any datetimes in given dict.""" isa_timestamp = lambda v: isinstance(v, datetime.datetime) return dict((k, v.isoformat() if isa_timestamp(v) else v) for (k, v) in six.iteritems(data)) def dict_to_keyval(value, key_base=None): """Expand a given dict to its corresponding key-value pairs. Generated keys are fully qualified, delimited using dot notation. ie. key = 'key.child_key.grandchild_key[0]' """ val_iter, key_func = None, None if isinstance(value, dict): val_iter = six.iteritems(value) key_func = lambda k: key_base + '.' + k if key_base else k elif isinstance(value, (tuple, list)): val_iter = enumerate(value) key_func = lambda k: key_base + '[%d]' % k if val_iter: for k, v in val_iter: key_gen = key_func(k) if isinstance(v, dict) or isinstance(v, (tuple, list)): for key_gen, v in dict_to_keyval(v, key_gen): yield key_gen, v else: yield key_gen, v def lowercase_keys(mapping): """Converts the values of the keys in mapping to lowercase.""" items = mapping.items() for key, value in items: del mapping[key] mapping[key.lower()] = value def lowercase_values(mapping): """Converts the values in the mapping dict to lowercase.""" items = mapping.items() for key, value in items: mapping[key] = value.lower() def update_nested(original_dict, updates): """Updates the leaf nodes in a nest dict. Updates occur without replacing entire sub-dicts. """ dict_to_update = copy.deepcopy(original_dict) for key, value in six.iteritems(updates): if isinstance(value, dict): sub_dict = update_nested(dict_to_update.get(key, {}), value) dict_to_update[key] = sub_dict else: dict_to_update[key] = updates[key] return dict_to_update def uniq(dupes, attrs): """Exclude elements of dupes with a duplicated set of attribute values.""" key = lambda d: '/'.join([getattr(d, a) or '' for a in attrs]) keys = [] deduped = [] for d in dupes: if key(d) not in keys: deduped.append(d) keys.append(key(d)) return deduped def hash_of_set(s): return str(hash(frozenset(s))) class HashRing(object): def __init__(self, nodes, replicas=100): self._ring = dict() self._sorted_keys = [] for node in nodes: for r in six.moves.range(replicas): hashed_key = self._hash('%s-%s' % (node, r)) self._ring[hashed_key] = node self._sorted_keys.append(hashed_key) self._sorted_keys.sort() @staticmethod def _hash(key): return struct.unpack_from('>I', hashlib.md5(str(key).encode()).digest())[0] def _get_position_on_ring(self, key): hashed_key = self._hash(key) position = bisect.bisect(self._sorted_keys, hashed_key) return position if position < len(self._sorted_keys) else 0 def get_node(self, key): if not self._ring: return None pos = self._get_position_on_ring(key) return self._ring[self._sorted_keys[pos]] def kill_listeners(listeners): # NOTE(gordc): correct usage of oslo.messaging listener is to stop(), # which stops new messages, and wait(), which processes remaining # messages and closes connection for listener in listeners: listener.stop() listener.wait() if sys.version_info > (2, 7, 9): match = fnmatch.fnmatch else: _MATCH_CACHE = {} _MATCH_CACHE_MAX = 100 def match(string, pattern): """Thread safe fnmatch re-implementation. Standard library fnmatch in Python versions <= 2.7.9 has thread safe issue, this helper function is created for such case. see: https://bugs.python.org/issue23191 """ string = string.lower() pattern = pattern.lower() cached_pattern = _MATCH_CACHE.get(pattern) if cached_pattern is None: translated_pattern = fnmatch.translate(pattern) cached_pattern = re.compile(translated_pattern) if len(_MATCH_CACHE) >= _MATCH_CACHE_MAX: _MATCH_CACHE.clear() _MATCH_CACHE[pattern] = cached_pattern return cached_pattern.match(string) is not None ceilometer-6.1.5/ceilometer/nova_client.py0000664000567000056710000001271513072744706022037 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import logging import novaclient from novaclient import client as nova_client from oslo_config import cfg from oslo_log import log from ceilometer import keystone_client OPTS = [ cfg.BoolOpt('nova_http_log_debug', default=False, # Added in Mikita deprecated_for_removal=True, help=('Allow novaclient\'s debug log output. ' '(Use default_log_levels instead)')), ] SERVICE_OPTS = [ cfg.StrOpt('nova', default='compute', help='Nova service type.'), ] cfg.CONF.register_opts(OPTS) cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') cfg.CONF.import_opt('http_timeout', 'ceilometer.service') cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') LOG = log.getLogger(__name__) def logged(func): @functools.wraps(func) def with_logging(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: LOG.exception(e) raise return with_logging class Client(object): """A client which gets information via python-novaclient.""" def __init__(self, endpoint_override=None, auth=None): """Initialize a nova client object.""" conf = cfg.CONF.service_credentials logger = None if cfg.CONF.nova_http_log_debug: logger = logging.getLogger("novaclient-debug") logger.setLevel(log.DEBUG) self.nova_client = nova_client.Client( version=2, session=keystone_client.get_session(), # nova adapter options region_name=conf.region_name, interface=conf.interface, service_type=cfg.CONF.service_types.nova, # keystone adapter options endpoint_override=endpoint_override, auth=auth, logger=logger) def _with_flavor_and_image(self, instances): flavor_cache = {} image_cache = {} for instance in instances: self._with_flavor(instance, flavor_cache) self._with_image(instance, image_cache) return instances def _with_flavor(self, instance, cache): fid = instance.flavor['id'] if fid in cache: flavor = cache.get(fid) else: try: flavor = self.nova_client.flavors.get(fid) except novaclient.exceptions.NotFound: flavor = None cache[fid] = flavor attr_defaults = [('name', 'unknown-id-%s' % fid), ('vcpus', 0), ('ram', 0), ('disk', 0), ('ephemeral', 0)] for attr, default in attr_defaults: if not flavor: instance.flavor[attr] = default continue instance.flavor[attr] = getattr(flavor, attr, default) def _with_image(self, instance, cache): try: iid = instance.image['id'] except TypeError: instance.image = None instance.kernel_id = None instance.ramdisk_id = None return if iid in cache: image = cache.get(iid) else: try: image = self.nova_client.images.get(iid) except novaclient.exceptions.NotFound: image = None cache[iid] = image attr_defaults = [('kernel_id', None), ('ramdisk_id', None)] instance.image['name'] = ( getattr(image, 'name') if image else 'unknown-id-%s' % iid) image_metadata = getattr(image, 'metadata', None) for attr, default in attr_defaults: ameta = image_metadata.get(attr) if image_metadata else default setattr(instance, attr, ameta) @logged def instance_get_all_by_host(self, hostname, since=None): """Returns list of instances on particular host. If since is supplied, it will return the instances changed since that datetime. since should be in ISO Format '%Y-%m-%dT%H:%M:%SZ' """ search_opts = {'host': hostname, 'all_tenants': True} if since: search_opts['changes-since'] = since return self._with_flavor_and_image(self.nova_client.servers.list( detailed=True, search_opts=search_opts)) @logged def instance_get_all(self, since=None): """Returns list of all instances. If since is supplied, it will return the instances changes since that datetime. since should be in ISO Format '%Y-%m-%dT%H:%M:%SZ' """ search_opts = {'all_tenants': True} if since: search_opts['changes-since'] = since return self.nova_client.servers.list( detailed=True, search_opts=search_opts) @logged def floating_ip_get_all(self): """Returns all floating ips.""" return self.nova_client.floating_ips.list() ceilometer-6.1.5/ceilometer/storage/0000775000567000056710000000000013072745164020621 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/storage/impl_db2.py0000664000567000056710000004302213072744706022665 0ustar jenkinsjenkins00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 eNovance # Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """DB2 storage backend """ from __future__ import division import copy import datetime import itertools import sys import bson.code import bson.objectid from oslo_config import cfg from oslo_utils import timeutils import pymongo import six import ceilometer from ceilometer import storage from ceilometer.storage import base from ceilometer.storage import models from ceilometer.storage.mongo import utils as pymongo_utils from ceilometer.storage import pymongo_base from ceilometer import utils AVAILABLE_CAPABILITIES = { 'resources': {'query': {'simple': True, 'metadata': True}}, 'statistics': {'groupby': True, 'query': {'simple': True, 'metadata': True}, 'aggregation': {'standard': True}} } class Connection(pymongo_base.Connection): """The db2 storage for Ceilometer Collections:: - meter - the raw incoming data - resource - the metadata for resources - { _id: uuid of resource, metadata: metadata dictionaries user_id: uuid project_id: uuid meter: [ array of {counter_name: string, counter_type: string, counter_unit: string} ] } """ CAPABILITIES = utils.update_nested(pymongo_base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) CONNECTION_POOL = pymongo_utils.ConnectionPool() GROUP = {'_id': '$counter_name', 'unit': {'$min': '$counter_unit'}, 'min': {'$min': '$counter_volume'}, 'max': {'$max': '$counter_volume'}, 'sum': {'$sum': '$counter_volume'}, 'count': {'$sum': 1}, 'duration_start': {'$min': '$timestamp'}, 'duration_end': {'$max': '$timestamp'}, } PROJECT = {'_id': 0, 'unit': 1, 'min': 1, 'max': 1, 'sum': 1, 'count': 1, 'avg': {'$divide': ['$sum', '$count']}, 'duration_start': 1, 'duration_end': 1, } SORT_OPERATION_MAP = {'desc': pymongo.DESCENDING, 'asc': pymongo.ASCENDING} SECONDS_IN_A_DAY = 86400 def __init__(self, url): # Since we are using pymongo, even though we are connecting to DB2 # we still have to make sure that the scheme which used to distinguish # db2 driver from mongodb driver be replaced so that pymongo will not # produce an exception on the scheme. url = url.replace('db2:', 'mongodb:', 1) self.conn = self.CONNECTION_POOL.connect(url) # Require MongoDB 2.2 to use aggregate(), since we are using mongodb # as backend for test, the following code is necessary to make sure # that the test wont try aggregate on older mongodb during the test. # For db2, the versionArray won't be part of the server_info, so there # will not be exception when real db2 gets used as backend. server_info = self.conn.server_info() if server_info.get('sysInfo'): self._using_mongodb = True else: self._using_mongodb = False if self._using_mongodb and server_info.get('versionArray') < [2, 2]: raise storage.StorageBadVersion("Need at least MongoDB 2.2") connection_options = pymongo.uri_parser.parse_uri(url) self.db = getattr(self.conn, connection_options['database']) if connection_options.get('username'): self.db.authenticate(connection_options['username'], connection_options['password']) self.upgrade() @classmethod def _build_sort_instructions(cls, sort_keys=None, sort_dir='desc'): """Returns a sort_instruction. Sort instructions are used in the query to determine what attributes to sort on and what direction to use. :param q: The query dict passed in. :param sort_keys: array of attributes by which results be sorted. :param sort_dir: direction in which results be sorted (asc, desc). :return: sort parameters """ sort_keys = sort_keys or [] sort_instructions = [] _sort_dir = cls.SORT_OPERATION_MAP.get( sort_dir, cls.SORT_OPERATION_MAP['desc']) for _sort_key in sort_keys: _instruction = (_sort_key, _sort_dir) sort_instructions.append(_instruction) return sort_instructions def _generate_random_str(self, str_len): init_str = str(bson.objectid.ObjectId()) objectid_len = len(init_str) if str_len >= objectid_len: init_str = (init_str * int(str_len/objectid_len) + 'x' * int(str_len % objectid_len)) return init_str def upgrade(self, version=None): # create collection if not present if 'resource' not in self.db.conn.collection_names(): self.db.conn.create_collection('resource') if 'meter' not in self.db.conn.collection_names(): self.db.conn.create_collection('meter') # Establish indexes # # We need variations for user_id vs. project_id because of the # way the indexes are stored in b-trees. The user_id and # project_id values are usually mutually exclusive in the # queries, so the database won't take advantage of an index # including both. if self.db.resource.index_information() == {}: # Initializing a longer resource id to workaround DB2 nosql issue. # Longer resource id is required by compute node's resource as # their id is '_'. DB2 creates a VARCHAR(70) # for resource id when its length < 70. But DB2 can create a # VARCHAR(n) for the resource id which has n(n>70) characters. # Users can adjust 'db2nosql_resource_id_maxlen'(default is 512) # for their ENV. resource_id = self._generate_random_str( cfg.CONF.database.db2nosql_resource_id_maxlen) self.db.resource.insert_one({'_id': resource_id, 'no_key': resource_id}) meter_id = str(bson.objectid.ObjectId()) timestamp = timeutils.utcnow() self.db.meter.insert_one({'_id': meter_id, 'no_key': meter_id, 'timestamp': timestamp}) self.db.resource.create_index([ ('user_id', pymongo.ASCENDING), ('project_id', pymongo.ASCENDING), ('source', pymongo.ASCENDING)], name='resource_idx') self.db.meter.create_index([ ('resource_id', pymongo.ASCENDING), ('user_id', pymongo.ASCENDING), ('project_id', pymongo.ASCENDING), ('counter_name', pymongo.ASCENDING), ('timestamp', pymongo.ASCENDING), ('source', pymongo.ASCENDING)], name='meter_idx') self.db.meter.create_index([('timestamp', pymongo.DESCENDING)], name='timestamp_idx') self.db.resource.remove({'_id': resource_id}) self.db.meter.remove({'_id': meter_id}) def clear(self): # db2 does not support drop_database, remove all collections for col in ['resource', 'meter']: self.db[col].drop() # drop_database command does nothing on db2 database since this has # not been implemented. However calling this method is important for # removal of all the empty dbs created during the test runs since # test run is against mongodb on Jenkins self.conn.drop_database(self.db.name) self.conn.close() def record_metering_data(self, data): """Write the data to the backend storage system. :param data: a dictionary such as returned by ceilometer.meter.meter_message_from_counter """ # Record the updated resource metadata data = copy.deepcopy(data) data['resource_metadata'] = pymongo_utils.improve_keys( data.pop('resource_metadata')) self.db.resource.update_one( {'_id': data['resource_id']}, {'$set': {'project_id': data['project_id'], 'user_id': data['user_id'] or 'null', 'metadata': data['resource_metadata'], 'source': data['source'], }, '$addToSet': {'meter': {'counter_name': data['counter_name'], 'counter_type': data['counter_type'], 'counter_unit': data['counter_unit'], }, }, }, upsert=True, ) # Record the raw data for the meter. Use a copy so we do not # modify a data structure owned by our caller (the driver adds # a new key '_id'). record = copy.copy(data) record['recorded_at'] = timeutils.utcnow() # Make sure that the data does have field _id which db2 wont add # automatically. if record.get('_id') is None: record['_id'] = str(bson.objectid.ObjectId()) self.db.meter.insert_one(record) def get_resources(self, user=None, project=None, source=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, metaquery=None, resource=None, limit=None): """Return an iterable of models.Resource instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param start_timestamp_op: Optional start time operator, like gt, ge. :param end_timestamp: Optional modified timestamp end range. :param end_timestamp_op: Optional end time operator, like lt, le. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. :param limit: Maximum number of results to return. """ if limit == 0: return metaquery = pymongo_utils.improve_keys(metaquery, metaquery=True) or {} q = {} if user is not None: q['user_id'] = user if project is not None: q['project_id'] = project if source is not None: q['source'] = source if resource is not None: q['resource_id'] = resource # Add resource_ prefix so it matches the field in the db q.update(dict(('resource_' + k, v) for (k, v) in six.iteritems(metaquery))) if start_timestamp or end_timestamp: # Look for resources matching the above criteria and with # samples in the time range we care about, then change the # resource query to return just those resources by id. ts_range = pymongo_utils.make_timestamp_range(start_timestamp, end_timestamp, start_timestamp_op, end_timestamp_op) if ts_range: q['timestamp'] = ts_range sort_keys = base._handle_sort_key('resource', 'timestamp') sort_keys.insert(0, 'resource_id') sort_instructions = self._build_sort_instructions(sort_keys=sort_keys, sort_dir='desc') resource = lambda x: x['resource_id'] if limit is not None: meters = self.db.meter.find(q, sort=sort_instructions, limit=limit) else: meters = self.db.meter.find(q, sort=sort_instructions) for resource_id, r_meters in itertools.groupby(meters, key=resource): # Because we have to know first/last timestamp, and we need a full # list of references to the resource's meters, we need a tuple # here. r_meters = tuple(r_meters) latest_meter = r_meters[0] last_ts = latest_meter['timestamp'] first_ts = r_meters[-1]['timestamp'] yield models.Resource(resource_id=latest_meter['resource_id'], project_id=latest_meter['project_id'], first_sample_timestamp=first_ts, last_sample_timestamp=last_ts, source=latest_meter['source'], user_id=latest_meter['user_id'], metadata=pymongo_utils.unquote_keys( latest_meter['resource_metadata'])) def get_meter_statistics(self, sample_filter, period=None, groupby=None, aggregate=None): """Return an iterable of models.Statistics instance. Items are containing meter statistics described by the query parameters. The filter must have a meter value set. """ if (groupby and set(groupby) - set(['user_id', 'project_id', 'resource_id', 'source'])): raise ceilometer.NotImplementedError( "Unable to group by these fields") if aggregate: raise ceilometer.NotImplementedError( 'Selectable aggregates not implemented') q = pymongo_utils.make_query_from_filter(sample_filter) if period: if sample_filter.start_timestamp: period_start = sample_filter.start_timestamp else: period_start = self.db.meter.find( limit=1, sort=[('timestamp', pymongo.ASCENDING)])[0]['timestamp'] if groupby: sort_keys = ['counter_name'] + groupby + ['timestamp'] else: sort_keys = ['counter_name', 'timestamp'] sort_instructions = self._build_sort_instructions(sort_keys=sort_keys, sort_dir='asc') meters = self.db.meter.find(q, sort=sort_instructions) def _group_key(meter): # the method to define a key for groupby call key = {} for y in sort_keys: if y == 'timestamp' and period: key[y] = (timeutils.delta_seconds(period_start, meter[y]) // period) elif y != 'timestamp': key[y] = meter[y] return key def _to_offset(periods): return {'days': (periods * period) // self.SECONDS_IN_A_DAY, 'seconds': (periods * period) % self.SECONDS_IN_A_DAY} for key, grouped_meters in itertools.groupby(meters, key=_group_key): stat = models.Statistics(unit=None, min=sys.maxsize, max=-sys.maxsize, avg=0, sum=0, count=0, period=0, period_start=0, period_end=0, duration=0, duration_start=0, duration_end=0, groupby=None) for meter in grouped_meters: stat.unit = meter.get('counter_unit', '') m_volume = meter.get('counter_volume') if stat.min > m_volume: stat.min = m_volume if stat.max < m_volume: stat.max = m_volume stat.sum += m_volume stat.count += 1 if stat.duration_start == 0: stat.duration_start = meter['timestamp'] stat.duration_end = meter['timestamp'] if groupby and not stat.groupby: stat.groupby = {} for group_key in groupby: stat.groupby[group_key] = meter[group_key] stat.duration = timeutils.delta_seconds(stat.duration_start, stat.duration_end) stat.avg = stat.sum / stat.count if period: stat.period = period periods = key.get('timestamp') stat.period_start = (period_start + datetime. timedelta(**(_to_offset(periods)))) stat.period_end = (period_start + datetime. timedelta(**(_to_offset(periods + 1)))) else: stat.period_start = stat.duration_start stat.period_end = stat.duration_end yield stat ceilometer-6.1.5/ceilometer/storage/impl_hbase.py0000664000567000056710000004500613072744706023304 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import operator import time from oslo_log import log from oslo_utils import timeutils import ceilometer from ceilometer.storage import base from ceilometer.storage.hbase import base as hbase_base from ceilometer.storage.hbase import migration as hbase_migration from ceilometer.storage.hbase import utils as hbase_utils from ceilometer.storage import models from ceilometer import utils LOG = log.getLogger(__name__) AVAILABLE_CAPABILITIES = { 'meters': {'query': {'simple': True, 'metadata': True}}, 'resources': {'query': {'simple': True, 'metadata': True}}, 'samples': {'query': {'simple': True, 'metadata': True}}, 'statistics': {'query': {'simple': True, 'metadata': True}, 'aggregation': {'standard': True}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } class Connection(hbase_base.Connection, base.Connection): """Put the metering data into a HBase database Collections: - meter (describes sample actually): - row-key: consists of reversed timestamp, meter and a message uuid for purposes of uniqueness - Column Families: f: contains the following qualifiers: - counter_name: - counter_type: - counter_unit: - counter_volume: - message: - message_id: - message_signature: - resource_metadata: raw metadata for corresponding resource of the meter - project_id: - resource_id: - user_id: - recorded_at: - flattened metadata with prefix r_metadata. e.g.:: f:r_metadata.display_name or f:r_metadata.tag - rts: - timestamp: - source for meter with prefix 's' - resource: - row_key: uuid of resource - Column Families: f: contains the following qualifiers: - resource_metadata: raw metadata for corresponding resource - project_id: - resource_id: - user_id: - flattened metadata with prefix r_metadata. e.g.:: f:r_metadata.display_name or f:r_metadata.tag - sources for all corresponding meters with prefix 's' - all meters with prefix 'm' for this resource in format: .. code-block:: python "%s:%s:%s:%s:%s" % (rts, source, counter_name, counter_type, counter_unit) """ CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) _memory_instance = None RESOURCE_TABLE = "resource" METER_TABLE = "meter" def __init__(self, url): super(Connection, self).__init__(url) def upgrade(self): tables = [self.RESOURCE_TABLE, self.METER_TABLE] column_families = {'f': dict(max_versions=1)} with self.conn_pool.connection() as conn: hbase_utils.create_tables(conn, tables, column_families) hbase_migration.migrate_tables(conn, tables) def clear(self): LOG.debug('Dropping HBase schema...') with self.conn_pool.connection() as conn: for table in [self.RESOURCE_TABLE, self.METER_TABLE]: try: conn.disable_table(table) except Exception: LOG.debug('Cannot disable table but ignoring error') try: conn.delete_table(table) except Exception: LOG.debug('Cannot delete table but ignoring error') def record_metering_data(self, data): """Write the data to the backend storage system. :param data: a dictionary such as returned by ceilometer.meter.meter_message_from_counter """ with self.conn_pool.connection() as conn: resource_table = conn.table(self.RESOURCE_TABLE) meter_table = conn.table(self.METER_TABLE) resource_metadata = data.get('resource_metadata', {}) # Determine the name of new meter rts = hbase_utils.timestamp(data['timestamp']) new_meter = hbase_utils.prepare_key( rts, data['source'], data['counter_name'], data['counter_type'], data['counter_unit']) # TODO(nprivalova): try not to store resource_id resource = hbase_utils.serialize_entry(**{ 'source': data['source'], 'meter': {new_meter: data['timestamp']}, 'resource_metadata': resource_metadata, 'resource_id': data['resource_id'], 'project_id': data['project_id'], 'user_id': data['user_id']}) # Here we put entry in HBase with our own timestamp. This is needed # when samples arrive out-of-order # If we use timestamp=data['timestamp'] the newest data will be # automatically 'on the top'. It is needed to keep metadata # up-to-date: metadata from newest samples is considered as actual. ts = int(time.mktime(data['timestamp'].timetuple()) * 1000) resource_table.put(hbase_utils.encode_unicode(data['resource_id']), resource, ts) # Rowkey consists of reversed timestamp, meter and a # message uuid for purposes of uniqueness row = hbase_utils.prepare_key(data['counter_name'], rts, data['message_id']) record = hbase_utils.serialize_entry( data, **{'source': data['source'], 'rts': rts, 'message': data, 'recorded_at': timeutils.utcnow()}) meter_table.put(row, record) def get_resources(self, user=None, project=None, source=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, metaquery=None, resource=None, limit=None): """Return an iterable of models.Resource instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param start_timestamp_op: Optional start time operator, like ge, gt. :param end_timestamp: Optional modified timestamp end range. :param end_timestamp_op: Optional end time operator, like lt, le. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. :param limit: Maximum number of results to return. """ if limit == 0: return q = hbase_utils.make_query(metaquery=metaquery, user_id=user, project_id=project, resource_id=resource, source=source) q = hbase_utils.make_meter_query_for_resource(start_timestamp, start_timestamp_op, end_timestamp, end_timestamp_op, source, q) with self.conn_pool.connection() as conn: resource_table = conn.table(self.RESOURCE_TABLE) LOG.debug("Query Resource table: %s", q) for resource_id, data in resource_table.scan(filter=q, limit=limit): f_res, meters, md = hbase_utils.deserialize_entry( data) resource_id = hbase_utils.encode_unicode(resource_id) # Unfortunately happybase doesn't keep ordered result from # HBase. So that's why it's needed to find min and max # manually first_ts = min(meters, key=operator.itemgetter(1))[1] last_ts = max(meters, key=operator.itemgetter(1))[1] source = meters[0][0][1] # If we use QualifierFilter then HBase returnes only # qualifiers filtered by. It will not return the whole entry. # That's why if we need to ask additional qualifiers manually. if 'project_id' not in f_res and 'user_id' not in f_res: row = resource_table.row( resource_id, columns=['f:project_id', 'f:user_id', 'f:resource_metadata']) f_res, _m, md = hbase_utils.deserialize_entry(row) yield models.Resource( resource_id=resource_id, first_sample_timestamp=first_ts, last_sample_timestamp=last_ts, project_id=f_res['project_id'], source=source, user_id=f_res['user_id'], metadata=md) def get_meters(self, user=None, project=None, resource=None, source=None, metaquery=None, limit=None, unique=False): """Return an iterable of models.Meter instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param resource: Optional resource filter. :param source: Optional source filter. :param metaquery: Optional dict with metadata to match on. :param limit: Maximum number of results to return. :param unique: If set to true, return only unique meter information. """ if limit == 0: return metaquery = metaquery or {} with self.conn_pool.connection() as conn: resource_table = conn.table(self.RESOURCE_TABLE) q = hbase_utils.make_query(metaquery=metaquery, user_id=user, project_id=project, resource_id=resource, source=source) LOG.debug("Query Resource table: %s", q) gen = resource_table.scan(filter=q) # We need result set to be sure that user doesn't receive several # same meters. Please see bug # https://bugs.launchpad.net/ceilometer/+bug/1301371 result = set() for ignored, data in gen: flatten_result, meters, md = hbase_utils.deserialize_entry( data) for m in meters: if limit and len(result) >= limit: return _m_rts, m_source, name, m_type, unit = m[0] if unique: meter_dict = {'name': name, 'type': m_type, 'unit': unit, 'resource_id': None, 'project_id': None, 'user_id': None, 'source': None} else: meter_dict = {'name': name, 'type': m_type, 'unit': unit, 'resource_id': flatten_result['resource_id'], 'project_id': flatten_result['project_id'], 'user_id': flatten_result['user_id']} frozen_meter = frozenset(meter_dict.items()) if frozen_meter in result: continue result.add(frozen_meter) if not unique: meter_dict.update({'source': m_source if m_source else None}) yield models.Meter(**meter_dict) def get_samples(self, sample_filter, limit=None): """Return an iterable of models.Sample instances. :param sample_filter: Filter. :param limit: Maximum number of results to return. """ if limit == 0: return with self.conn_pool.connection() as conn: meter_table = conn.table(self.METER_TABLE) q, start, stop, columns = (hbase_utils. make_sample_query_from_filter (sample_filter, require_meter=False)) LOG.debug("Query Meter Table: %s", q) gen = meter_table.scan(filter=q, row_start=start, row_stop=stop, limit=limit, columns=columns) for ignored, meter in gen: d_meter = hbase_utils.deserialize_entry(meter)[0] d_meter['message']['counter_volume'] = ( float(d_meter['message']['counter_volume'])) d_meter['message']['recorded_at'] = d_meter['recorded_at'] yield models.Sample(**d_meter['message']) @staticmethod def _update_meter_stats(stat, meter): """Do the stats calculation on a requested time bucket in stats dict :param stats: dict where aggregated stats are kept :param index: time bucket index in stats :param meter: meter record as returned from HBase :param start_time: query start time :param period: length of the time bucket """ vol = meter['counter_volume'] ts = meter['timestamp'] stat.unit = meter['counter_unit'] stat.min = min(vol, stat.min or vol) stat.max = max(vol, stat.max) stat.sum = vol + (stat.sum or 0) stat.count += 1 stat.avg = (stat.sum / float(stat.count)) stat.duration_start = min(ts, stat.duration_start or ts) stat.duration_end = max(ts, stat.duration_end or ts) stat.duration = (timeutils.delta_seconds(stat.duration_start, stat.duration_end)) def get_meter_statistics(self, sample_filter, period=None, groupby=None, aggregate=None): """Return an iterable of models.Statistics instances. Items are containing meter statistics described by the query parameters. The filter must have a meter value set. .. note:: Due to HBase limitations the aggregations are implemented in the driver itself, therefore this method will be quite slow because of all the Thrift traffic it is going to create. """ if groupby: raise ceilometer.NotImplementedError("Group by not implemented.") if aggregate: raise ceilometer.NotImplementedError( 'Selectable aggregates not implemented') with self.conn_pool.connection() as conn: meter_table = conn.table(self.METER_TABLE) q, start, stop, columns = (hbase_utils. make_sample_query_from_filter (sample_filter)) # These fields are used in statistics' calculating columns.extend(['f:timestamp', 'f:counter_volume', 'f:counter_unit']) meters = map(hbase_utils.deserialize_entry, list(meter for (ignored, meter) in meter_table.scan( filter=q, row_start=start, row_stop=stop, columns=columns))) if sample_filter.start_timestamp: start_time = sample_filter.start_timestamp elif meters: start_time = meters[-1][0]['timestamp'] else: start_time = None if sample_filter.end_timestamp: end_time = sample_filter.end_timestamp elif meters: end_time = meters[0][0]['timestamp'] else: end_time = None results = [] if not period: period = 0 period_start = start_time period_end = end_time # As our HBase meters are stored as newest-first, we need to iterate # in the reverse order for meter in meters[::-1]: ts = meter[0]['timestamp'] if period: offset = int(timeutils.delta_seconds( start_time, ts) / period) * period period_start = start_time + datetime.timedelta(0, offset) if not results or not results[-1].period_start == period_start: if period: period_end = period_start + datetime.timedelta( 0, period) results.append( models.Statistics(unit='', count=0, min=0, max=0, avg=0, sum=0, period=period, period_start=period_start, period_end=period_end, duration=None, duration_start=None, duration_end=None, groupby=None) ) self._update_meter_stats(results[-1], meter[0]) return results ceilometer-6.1.5/ceilometer/storage/impl_mongodb.py0000664000567000056710000007027713072744706023657 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 eNovance # Copyright 2014 Red Hat, Inc # # Authors: Doug Hellmann # Julien Danjou # Eoghan Glynn # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """MongoDB storage backend""" import copy import datetime import uuid import bson.code import bson.objectid from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import pymongo import six import ceilometer from ceilometer import storage from ceilometer.storage import base from ceilometer.storage import models from ceilometer.storage.mongo import utils as pymongo_utils from ceilometer.storage import pymongo_base from ceilometer import utils LOG = log.getLogger(__name__) AVAILABLE_CAPABILITIES = { 'resources': {'query': {'simple': True, 'metadata': True}}, 'statistics': {'groupby': True, 'query': {'simple': True, 'metadata': True}, 'aggregation': {'standard': True, 'selectable': {'max': True, 'min': True, 'sum': True, 'avg': True, 'count': True, 'stddev': True, 'cardinality': True}}} } class Connection(pymongo_base.Connection): """Put the data into a MongoDB database Collections:: - meter - the raw incoming data - resource - the metadata for resources - { _id: uuid of resource, metadata: metadata dictionaries user_id: uuid project_id: uuid meter: [ array of {counter_name: string, counter_type: string, counter_unit: string} ] } """ CAPABILITIES = utils.update_nested(pymongo_base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) CONNECTION_POOL = pymongo_utils.ConnectionPool() STANDARD_AGGREGATES = dict([(a.name, a) for a in [ pymongo_utils.SUM_AGGREGATION, pymongo_utils.AVG_AGGREGATION, pymongo_utils.MIN_AGGREGATION, pymongo_utils.MAX_AGGREGATION, pymongo_utils.COUNT_AGGREGATION, ]]) AGGREGATES = dict([(a.name, a) for a in [ pymongo_utils.SUM_AGGREGATION, pymongo_utils.AVG_AGGREGATION, pymongo_utils.MIN_AGGREGATION, pymongo_utils.MAX_AGGREGATION, pymongo_utils.COUNT_AGGREGATION, pymongo_utils.STDDEV_AGGREGATION, pymongo_utils.CARDINALITY_AGGREGATION, ]]) SORT_OPERATION_MAPPING = {'desc': (pymongo.DESCENDING, '$lt'), 'asc': (pymongo.ASCENDING, '$gt')} MAP_RESOURCES = bson.code.Code(""" function () { emit(this.resource_id, {user_id: this.user_id, project_id: this.project_id, source: this.source, first_timestamp: this.timestamp, last_timestamp: this.timestamp, metadata: this.resource_metadata}) }""") REDUCE_RESOURCES = bson.code.Code(""" function (key, values) { var merge = {user_id: values[0].user_id, project_id: values[0].project_id, source: values[0].source, first_timestamp: values[0].first_timestamp, last_timestamp: values[0].last_timestamp, metadata: values[0].metadata} values.forEach(function(value) { if (merge.first_timestamp - value.first_timestamp > 0) { merge.first_timestamp = value.first_timestamp; merge.user_id = value.user_id; merge.project_id = value.project_id; merge.source = value.source; } else if (merge.last_timestamp - value.last_timestamp <= 0) { merge.last_timestamp = value.last_timestamp; merge.metadata = value.metadata; } }); return merge; }""") _GENESIS = datetime.datetime(year=datetime.MINYEAR, month=1, day=1) _APOCALYPSE = datetime.datetime(year=datetime.MAXYEAR, month=12, day=31, hour=23, minute=59, second=59) def __init__(self, url): # NOTE(jd) Use our own connection pooling on top of the Pymongo one. # We need that otherwise we overflow the MongoDB instance with new # connection since we instantiate a Pymongo client each time someone # requires a new storage connection. self.conn = self.CONNECTION_POOL.connect(url) self.version = self.conn.server_info()['versionArray'] # Require MongoDB 2.4 to use $setOnInsert if self.version < pymongo_utils.MINIMUM_COMPATIBLE_MONGODB_VERSION: raise storage.StorageBadVersion( "Need at least MongoDB %s" % pymongo_utils.MINIMUM_COMPATIBLE_MONGODB_VERSION) connection_options = pymongo.uri_parser.parse_uri(url) self.db = getattr(self.conn, connection_options['database']) if connection_options.get('username'): self.db.authenticate(connection_options['username'], connection_options['password']) # NOTE(jd) Upgrading is just about creating index, so let's do this # on connection to be sure at least the TTL is correctly updated if # needed. self.upgrade() @staticmethod def update_ttl(ttl, ttl_index_name, index_field, coll): """Update or create time_to_live indexes. :param ttl: time to live in seconds. :param ttl_index_name: name of the index we want to update or create. :param index_field: field with the index that we need to update. :param coll: collection which indexes need to be updated. """ indexes = coll.index_information() if ttl <= 0: if ttl_index_name in indexes: coll.drop_index(ttl_index_name) return if ttl_index_name in indexes: return coll.database.command( 'collMod', coll.name, index={'keyPattern': {index_field: pymongo.ASCENDING}, 'expireAfterSeconds': ttl}) coll.create_index([(index_field, pymongo.ASCENDING)], expireAfterSeconds=ttl, name=ttl_index_name) def upgrade(self): # Establish indexes # # We need variations for user_id vs. project_id because of the # way the indexes are stored in b-trees. The user_id and # project_id values are usually mutually exclusive in the # queries, so the database won't take advantage of an index # including both. # create collection if not present if 'resource' not in self.db.conn.collection_names(): self.db.conn.create_collection('resource') if 'meter' not in self.db.conn.collection_names(): self.db.conn.create_collection('meter') name_qualifier = dict(user_id='', project_id='project_') background = dict(user_id=False, project_id=True) for primary in ['user_id', 'project_id']: name = 'meter_%sidx' % name_qualifier[primary] self.db.meter.create_index([ ('resource_id', pymongo.ASCENDING), (primary, pymongo.ASCENDING), ('counter_name', pymongo.ASCENDING), ('timestamp', pymongo.ASCENDING), ], name=name, background=background[primary]) self.db.meter.create_index([('timestamp', pymongo.DESCENDING)], name='timestamp_idx') # NOTE(ityaptin) This index covers get_resource requests sorting # and MongoDB uses part of this compound index for different # queries based on any of user_id, project_id, last_sample_timestamp # fields self.db.resource.create_index([('user_id', pymongo.DESCENDING), ('project_id', pymongo.DESCENDING), ('last_sample_timestamp', pymongo.DESCENDING)], name='resource_user_project_timestamp',) self.db.resource.create_index([('last_sample_timestamp', pymongo.DESCENDING)], name='last_sample_timestamp_idx') # update or create time_to_live index ttl = cfg.CONF.database.metering_time_to_live self.update_ttl(ttl, 'meter_ttl', 'timestamp', self.db.meter) self.update_ttl(ttl, 'resource_ttl', 'last_sample_timestamp', self.db.resource) def clear(self): self.conn.drop_database(self.db.name) # Connection will be reopened automatically if needed self.conn.close() def record_metering_data(self, data): """Write the data to the backend storage system. :param data: a dictionary such as returned by ceilometer.meter.meter_message_from_counter """ # Record the updated resource metadata - we use $setOnInsert to # unconditionally insert sample timestamps and resource metadata # (in the update case, this must be conditional on the sample not # being out-of-order) data = copy.deepcopy(data) data['resource_metadata'] = pymongo_utils.improve_keys( data.pop('resource_metadata')) resource = self.db.resource.find_one_and_update( {'_id': data['resource_id']}, {'$set': {'project_id': data['project_id'], 'user_id': data['user_id'], 'source': data['source'], }, '$setOnInsert': {'metadata': data['resource_metadata'], 'first_sample_timestamp': data['timestamp'], 'last_sample_timestamp': data['timestamp'], }, '$addToSet': {'meter': {'counter_name': data['counter_name'], 'counter_type': data['counter_type'], 'counter_unit': data['counter_unit'], }, }, }, upsert=True, return_document=pymongo.ReturnDocument.AFTER, ) # only update last sample timestamp if actually later (the usual # in-order case) last_sample_timestamp = resource.get('last_sample_timestamp') if (last_sample_timestamp is None or last_sample_timestamp <= data['timestamp']): self.db.resource.update_one( {'_id': data['resource_id']}, {'$set': {'metadata': data['resource_metadata'], 'last_sample_timestamp': data['timestamp']}} ) # only update first sample timestamp if actually earlier (the unusual # out-of-order case) # NOTE: a null first sample timestamp is not updated as this indicates # a pre-existing resource document dating from before we started # recording these timestamps in the resource collection first_sample_timestamp = resource.get('first_sample_timestamp') if (first_sample_timestamp is not None and first_sample_timestamp > data['timestamp']): self.db.resource.update_one( {'_id': data['resource_id']}, {'$set': {'first_sample_timestamp': data['timestamp']}} ) # Record the raw data for the meter. Use a copy so we do not # modify a data structure owned by our caller (the driver adds # a new key '_id'). record = copy.copy(data) record['recorded_at'] = timeutils.utcnow() self.db.meter.insert_one(record) def clear_expired_metering_data(self, ttl): """Clear expired data from the backend storage system. Clearing occurs with native MongoDB time-to-live feature. """ LOG.debug("Clearing expired metering data is based on native " "MongoDB time to live feature and going in background.") @classmethod def _build_sort_instructions(cls, sort_keys=None, sort_dir='desc'): """Returns a sort_instruction and paging operator. Sort instructions are used in the query to determine what attributes to sort on and what direction to use. :param q: The query dict passed in. :param sort_keys: array of attributes by which results be sorted. :param sort_dir: direction in which results be sorted (asc, desc). :return: sort instructions and paging operator """ sort_keys = sort_keys or [] sort_instructions = [] _sort_dir, operation = cls.SORT_OPERATION_MAPPING.get( sort_dir, cls.SORT_OPERATION_MAPPING['desc']) for _sort_key in sort_keys: _instruction = (_sort_key, _sort_dir) sort_instructions.append(_instruction) return sort_instructions, operation def _get_time_constrained_resources(self, query, start_timestamp, start_timestamp_op, end_timestamp, end_timestamp_op, metaquery, resource, limit): """Return an iterable of models.Resource instances Items are constrained by sample timestamp. :param query: project/user/source query :param start_timestamp: modified timestamp start range. :param start_timestamp_op: start time operator, like gt, ge. :param end_timestamp: modified timestamp end range. :param end_timestamp_op: end time operator, like lt, le. :param metaquery: dict with metadata to match on. :param resource: resource filter. """ if resource is not None: query['resource_id'] = resource # Add resource_ prefix so it matches the field in the db query.update(dict(('resource_' + k, v) for (k, v) in six.iteritems(metaquery))) # FIXME(dhellmann): This may not perform very well, # but doing any better will require changing the database # schema and that will need more thought than I have time # to put into it today. # Look for resources matching the above criteria and with # samples in the time range we care about, then change the # resource query to return just those resources by id. ts_range = pymongo_utils.make_timestamp_range(start_timestamp, end_timestamp, start_timestamp_op, end_timestamp_op) if ts_range: query['timestamp'] = ts_range sort_keys = base._handle_sort_key('resource') sort_instructions = self._build_sort_instructions(sort_keys)[0] # use a unique collection name for the results collection, # as result post-sorting (as oppposed to reduce pre-sorting) # is not possible on an inline M-R out = 'resource_list_%s' % uuid.uuid4() self.db.meter.map_reduce(self.MAP_RESOURCES, self.REDUCE_RESOURCES, out=out, sort={'resource_id': 1}, query=query) try: if limit is not None: results = self.db[out].find(sort=sort_instructions, limit=limit) else: results = self.db[out].find(sort=sort_instructions) for r in results: resource = r['value'] yield models.Resource( resource_id=r['_id'], user_id=resource['user_id'], project_id=resource['project_id'], first_sample_timestamp=resource['first_timestamp'], last_sample_timestamp=resource['last_timestamp'], source=resource['source'], metadata=pymongo_utils.unquote_keys(resource['metadata'])) finally: self.db[out].drop() def _get_floating_resources(self, query, metaquery, resource, limit): """Return an iterable of models.Resource instances Items are unconstrained by timestamp. :param query: project/user/source query :param metaquery: dict with metadata to match on. :param resource: resource filter. """ if resource is not None: query['_id'] = resource query.update(dict((k, v) for (k, v) in six.iteritems(metaquery))) keys = base._handle_sort_key('resource') sort_keys = ['last_sample_timestamp' if i == 'timestamp' else i for i in keys] sort_instructions = self._build_sort_instructions(sort_keys)[0] if limit is not None: results = self.db.resource.find(query, sort=sort_instructions, limit=limit) else: results = self.db.resource.find(query, sort=sort_instructions) for r in results: yield models.Resource( resource_id=r['_id'], user_id=r['user_id'], project_id=r['project_id'], first_sample_timestamp=r.get('first_sample_timestamp', self._GENESIS), last_sample_timestamp=r.get('last_sample_timestamp', self._APOCALYPSE), source=r['source'], metadata=pymongo_utils.unquote_keys(r['metadata'])) def get_resources(self, user=None, project=None, source=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, metaquery=None, resource=None, limit=None): """Return an iterable of models.Resource instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param start_timestamp_op: Optional start time operator, like gt, ge. :param end_timestamp: Optional modified timestamp end range. :param end_timestamp_op: Optional end time operator, like lt, le. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. :param limit: Maximum number of results to return. """ if limit == 0: return metaquery = pymongo_utils.improve_keys(metaquery, metaquery=True) or {} query = {} if user is not None: query['user_id'] = user if project is not None: query['project_id'] = project if source is not None: query['source'] = source if start_timestamp or end_timestamp: return self._get_time_constrained_resources(query, start_timestamp, start_timestamp_op, end_timestamp, end_timestamp_op, metaquery, resource, limit) else: return self._get_floating_resources(query, metaquery, resource, limit) @staticmethod def _make_period_dict(period, first_ts): """Create a period field for _id of grouped fields. :param period: Period duration in seconds :param first_ts: First timestamp for first period :return: """ if period >= 0: period_unique_dict = { "period_start": { "$divide": [ {"$subtract": [ {"$subtract": ["$timestamp", first_ts]}, {"$mod": [{"$subtract": ["$timestamp", first_ts]}, period * 1000] } ]}, period * 1000 ] } } else: # Note(ityaptin) Hack for older MongoDB versions (2.4.+ and older). # Since 2.6+ we could use $literal operator period_unique_dict = {"$period_start": {"$add": [0, 0]}} return period_unique_dict def get_meter_statistics(self, sample_filter, period=None, groupby=None, aggregate=None): """Return an iterable of models.Statistics instance. Items are containing meter statistics described by the query parameters. The filter must have a meter value set. """ if (groupby and set(groupby) - set(['user_id', 'project_id', 'resource_id', 'source', 'resource_metadata.instance_type'])): raise ceilometer.NotImplementedError( "Unable to group by these fields") q = pymongo_utils.make_query_from_filter(sample_filter) group_stage = {} project_stage = { "unit": "$_id.unit", "name": "$_id.name", "first_timestamp": "$first_timestamp", "last_timestamp": "$last_timestamp", "period_start": "$_id.period_start", } # Add timestamps to $group stage group_stage.update({"first_timestamp": {"$min": "$timestamp"}, "last_timestamp": {"$max": "$timestamp"}}) # Define a _id field for grouped documents unique_group_field = {"name": "$counter_name", "unit": "$counter_unit"} # Define a first timestamp for periods if sample_filter.start_timestamp: first_timestamp = sample_filter.start_timestamp else: first_timestamp_cursor = self.db.meter.find( limit=1, sort=[('timestamp', pymongo.ASCENDING)]) if first_timestamp_cursor.count(): first_timestamp = first_timestamp_cursor[0]['timestamp'] else: first_timestamp = utils.EPOCH_TIME # Add a start_period field to unique identifier of grouped documents if period: period_dict = self._make_period_dict(period, first_timestamp) unique_group_field.update(period_dict) # Add a groupby fields to unique identifier of grouped documents if groupby: unique_group_field.update(dict((field.replace(".", "/"), "$%s" % field) for field in groupby)) group_stage.update({"_id": unique_group_field}) self._compile_aggregate_stages(aggregate, group_stage, project_stage) # Aggregation stages list. It's work one by one and uses documents # from previous stages. aggregation_query = [{'$match': q}, {"$sort": {"timestamp": 1}}, {"$group": group_stage}, {"$sort": {"_id.period_start": 1}}, {"$project": project_stage}] # results is dict in pymongo<=2.6.3 and CommandCursor in >=3.0 results = self.db.meter.aggregate(aggregation_query, **self._make_aggregation_params()) return [self._stats_result_to_model(point, groupby, aggregate, period, first_timestamp) for point in self._get_results(results)] def _stats_result_aggregates(self, result, aggregate): stats_args = {} for attr, func in Connection.STANDARD_AGGREGATES.items(): if attr in result: stats_args.update(func.finalize(result, version_array=self.version)) if aggregate: stats_args['aggregate'] = {} for agr in aggregate: stats_args['aggregate'].update( Connection.AGGREGATES[agr.func].finalize( result, agr.param, self.version)) return stats_args def _stats_result_to_model(self, result, groupby, aggregate, period, first_timestamp): if period is None: period = 0 first_timestamp = pymongo_utils.from_unix_timestamp(first_timestamp) stats_args = self._stats_result_aggregates(result, aggregate) stats_args['unit'] = result['unit'] stats_args['duration'] = (result["last_timestamp"] - result["first_timestamp"]).total_seconds() stats_args['duration_start'] = result['first_timestamp'] stats_args['duration_end'] = result['last_timestamp'] stats_args['period'] = period start = result.get("period_start", 0) * period stats_args['period_start'] = (first_timestamp + datetime.timedelta(seconds=start)) stats_args['period_end'] = (first_timestamp + datetime.timedelta(seconds=start + period) if period else result['last_timestamp']) stats_args['groupby'] = ( dict((g, result['_id'].get(g.replace(".", "/"))) for g in groupby) if groupby else None) return models.Statistics(**stats_args) def _compile_aggregate_stages(self, aggregate, group_stage, project_stage): if not aggregate: for aggregation in Connection.STANDARD_AGGREGATES.values(): group_stage.update( aggregation.group(version_array=self.version) ) project_stage.update( aggregation.project( version_array=self.version ) ) else: for description in aggregate: aggregation = Connection.AGGREGATES.get(description.func) if aggregation: if not aggregation.validate(description.param): raise storage.StorageBadAggregate( 'Bad aggregate: %s.%s' % (description.func, description.param)) group_stage.update( aggregation.group(description.param, version_array=self.version) ) project_stage.update( aggregation.project(description.param, version_array=self.version) ) @staticmethod def _get_results(results): if isinstance(results, dict): return results.get('result', []) else: return results def _make_aggregation_params(self): if self.version >= pymongo_utils.COMPLETE_AGGREGATE_COMPATIBLE_VERSION: return {"allowDiskUse": True} return {} ceilometer-6.1.5/ceilometer/storage/impl_log.py0000664000567000056710000001144413072744706023002 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Simple logging storage backend. """ from oslo_log import log from ceilometer.i18n import _LI from ceilometer.storage import base LOG = log.getLogger(__name__) class Connection(base.Connection): """Log the data.""" def upgrade(self): pass def clear(self): pass def record_metering_data(self, data): """Write the data to the backend storage system. :param data: a dictionary such as returned by ceilometer.meter.meter_message_from_counter. """ LOG.info(_LI('metering data %(counter_name)s for %(resource_id)s: ' '%(counter_volume)s') % ({'counter_name': data['counter_name'], 'resource_id': data['resource_id'], 'counter_volume': data['counter_volume']})) def clear_expired_metering_data(self, ttl): """Clear expired data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep records for. """ LOG.info(_LI("Dropping metering data with TTL %d"), ttl) def get_resources(self, user=None, project=None, source=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, metaquery=None, resource=None, limit=None): """Return an iterable of dictionaries containing resource information. { 'resource_id': UUID of the resource, 'project_id': UUID of project owning the resource, 'user_id': UUID of user owning the resource, 'timestamp': UTC datetime of last update to the resource, 'metadata': most current metadata for the resource, 'meter': list of the meters reporting data for the resource, } :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param start_timestamp_op: Optional start time operator, like gt, ge. :param end_timestamp: Optional modified timestamp end range. :param end_timestamp_op: Optional end time operator, like lt, le. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. :param limit: Maximum number of results to return. """ return [] def get_meters(self, user=None, project=None, resource=None, source=None, limit=None, metaquery=None, unique=False): """Return an iterable of dictionaries containing meter information. { 'name': name of the meter, 'type': type of the meter (gauge, delta, cumulative), 'resource_id': UUID of the resource, 'project_id': UUID of project owning the resource, 'user_id': UUID of user owning the resource, } :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param resource: Optional resource filter. :param source: Optional source filter. :param limit: Maximum number of results to return. :param metaquery: Optional dict with metadata to match on. :param unique: If set to true, return only unique meter information. """ return [] def get_samples(self, sample_filter, limit=None): """Return an iterable of samples. Items are created by :func:`ceilometer.meter.meter_message_from_counter`. """ return [] def get_meter_statistics(self, sample_filter, period=None, groupby=None, aggregate=None): """Return a dictionary containing meter statistics. Meter statistics is described by the query parameters. The filter must have a meter value set. { 'min': 'max': 'avg': 'sum': 'count': 'period': 'period_start': 'period_end': 'duration': 'duration_start': 'duration_end': } """ return [] ceilometer-6.1.5/ceilometer/storage/sqlalchemy/0000775000567000056710000000000013072745164022763 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/0000775000567000056710000000000013072745164025440 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/manage.py0000664000567000056710000000016413072744703027241 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python from migrate.versioning.shell import main if __name__ == '__main__': main(debug='False') ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/README0000664000567000056710000000021713072744703026316 0ustar jenkinsjenkins00000000000000sqlalchemy-migrate is DEPRECATED. All new migrations should be written using alembic. Please see ceilometer/storage/sqlalchemy/alembic/README ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg0000664000567000056710000000231613072744703027551 0ustar jenkinsjenkins00000000000000[db_settings] # Used to identify which repository this database is versioned under. # You can use the name of your project. repository_id=ceilometer # The name of the database table used to track the schema version. # This name shouldn't already be used by your project. # If this is changed once a database is under version control, you'll need to # change the table name in each database too. version_table=migrate_version # When committing a change script, Migrate will attempt to generate the # sql for all supported databases; normally, if one of them fails - probably # because you don't have that database installed - it is ignored and the # commit continues, perhaps ending successfully. # Databases in this list MUST compile successfully during a commit, or the # entire commit will fail. List the databases your application will actually # be using to ensure your updates to that database work properly. # This must be a list; example: ['postgres','sqlite'] required_dbs=[] # When creating new change scripts, Migrate will stamp the new script with # a version number. By default this is latest_version + 1. You can set this # to 'true' to tell Migrate to use the UTC timestamp instead. use_timestamp_numbering=False ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/0000775000567000056710000000000013072745164027310 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.pyceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision0000664000567000056710000000406213072744703035370 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from ceilometer.storage.sqlalchemy import migration from ceilometer.storage.sqlalchemy import models def _convert_data_type(table, col, from_t, to_t, pk_attr='id'): temp_col_n = 'convert_data_type_temp_col' # Override column we're going to convert with from_t, since the type we're # replacing could be custom and we need to tell SQLALchemy how to perform # CRUD operations with it. table = sa.Table(table.name, table.metadata, sa.Column(col, from_t), extend_existing=True) sa.Column(temp_col_n, to_t).create(table) key_attr = getattr(table.c, pk_attr) orig_col = getattr(table.c, col) new_col = getattr(table.c, temp_col_n) query = sa.select([key_attr, orig_col]) for key, value in migration.paged(query): (table.update().where(key_attr == key).values({temp_col_n: value}). execute()) orig_col.drop() new_col.alter(name=col) to_convert = [ ('alarm', 'timestamp', 'id'), ('alarm', 'state_timestamp', 'id'), ('alarm_history', 'timestamp', 'alarm_id'), ] def upgrade(migrate_engine): if migrate_engine.name == 'mysql': meta = sa.MetaData(bind=migrate_engine) for table_name, col_name, pk_attr in to_convert: table = sa.Table(table_name, meta, autoload=True) _convert_data_type(table, col_name, sa.DateTime(), models.PreciseTimestamp(), pk_attr=pk_attr) ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py0000664000567000056710000000343513072744703033326 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, Table, Column, Text from sqlalchemy import Boolean, Integer, String, DateTime, Float def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) alarm = Table( 'alarm', meta, Column('id', String(255), primary_key=True, index=True), Column('enabled', Boolean), Column('name', Text()), Column('description', Text()), Column('timestamp', DateTime(timezone=False)), Column('counter_name', String(255), index=True), Column('user_id', String(255), index=True), Column('project_id', String(255), index=True), Column('comparison_operator', String(2)), Column('threshold', Float), Column('statistic', String(255)), Column('evaluation_periods', Integer), Column('period', Integer), Column('state', String(255)), Column('state_timestamp', DateTime(timezone=False)), Column('ok_actions', Text()), Column('alarm_actions', Text()), Column('insufficient_data_actions', Text()), Column('matching_metadata', Text()), mysql_engine='InnoDB', mysql_charset='utf8') alarm.create() ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.pyceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_t0000664000567000056710000000533513072744703035436 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import ForeignKeyConstraint import sqlalchemy as sa from ceilometer.storage.sqlalchemy import migration TABLES = ['sample', 'resource', 'source', 'sourceassoc'] DROP_TABLES = ['resource', 'source', 'sourceassoc'] INDEXES = { "sample": (('resource_id', 'resource', 'id'),), "sourceassoc": (('sample_id', 'sample', 'id'), ('resource_id', 'resource', 'id'), ('source_id', 'source', 'id')) } def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) load_tables = dict((table_name, sa.Table(table_name, meta, autoload=True)) for table_name in TABLES) # drop foreign keys if migrate_engine.name != 'sqlite': for table_name, indexes in INDEXES.items(): table = load_tables[table_name] for column, ref_table_name, ref_column_name in indexes: ref_table = load_tables[ref_table_name] params = {'columns': [table.c[column]], 'refcolumns': [ref_table.c[ref_column_name]]} fk_table_name = table_name if migrate_engine.name == "mysql": params['name'] = "_".join(('fk', fk_table_name, column)) elif (migrate_engine.name == "postgresql" and table_name == 'sample'): # fk was not renamed in script 030 params['name'] = "_".join(('meter', column, 'fkey')) fkey = ForeignKeyConstraint(**params) fkey.drop() # create source field in sample sample = load_tables['sample'] sample.create_column(sa.Column('source_id', sa.String(255))) # move source values to samples sourceassoc = load_tables['sourceassoc'] query = (sa.select([sourceassoc.c.sample_id, sourceassoc.c.source_id]). where(sourceassoc.c.sample_id.isnot(None))) for sample_id, source_id in migration.paged(query): (sample.update().where(sample_id == sample.c.id). values({'source_id': source_id}).execute()) # drop tables for table_name in DROP_TABLES: sa.Table(table_name, meta, autoload=True).drop() ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py0000664000567000056710000000647013072744703035340 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import ForeignKeyConstraint, UniqueConstraint import sqlalchemy as sa TABLES_DROP = ['user', 'project'] TABLES = ['user', 'project', 'sourceassoc', 'sample', 'resource', 'alarm_history'] INDEXES = { "sample": (('user_id', 'user', 'id'), ('project_id', 'project', 'id')), "sourceassoc": (('user_id', 'user', 'id'), ('project_id', 'project', 'id')), "resource": (('user_id', 'user', 'id'), ('project_id', 'project', 'id')), "alarm_history": (('user_id', 'user', 'id'), ('project_id', 'project', 'id'), ('on_behalf_of', 'project', 'id')), } def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) load_tables = dict((table_name, sa.Table(table_name, meta, autoload=True)) for table_name in TABLES) if migrate_engine.name != 'sqlite': for table_name, indexes in INDEXES.items(): table = load_tables[table_name] for column, ref_table_name, ref_column_name in indexes: ref_table = load_tables[ref_table_name] params = {'columns': [table.c[column]], 'refcolumns': [ref_table.c[ref_column_name]]} if (migrate_engine.name == "mysql" and table_name != 'alarm_history'): params['name'] = "_".join(('fk', table_name, column)) elif (migrate_engine.name == "postgresql" and table_name == "sample"): # The fk contains the old table name params['name'] = "_".join(('meter', column, 'fkey')) fkey = ForeignKeyConstraint(**params) fkey.drop() sourceassoc = load_tables['sourceassoc'] if migrate_engine.name != 'sqlite': idx = sa.Index('idx_su', sourceassoc.c.source_id, sourceassoc.c.user_id) idx.drop(bind=migrate_engine) idx = sa.Index('idx_sp', sourceassoc.c.source_id, sourceassoc.c.project_id) idx.drop(bind=migrate_engine) params = {} if migrate_engine.name == "mysql": params = {'name': 'uniq_sourceassoc0sample_id'} uc = UniqueConstraint('sample_id', table=sourceassoc, **params) uc.create() params = {} if migrate_engine.name == "mysql": params = {'name': 'uniq_sourceassoc0sample_id0user_id'} uc = UniqueConstraint('sample_id', 'user_id', table=sourceassoc, **params) uc.drop() sourceassoc.c.user_id.drop() sourceassoc.c.project_id.drop() for table_name in TABLES_DROP: sa.Table(table_name, meta, autoload=True).drop() ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py0000664000567000056710000000162113072744703033142 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData from sqlalchemy import Table from sqlalchemy import VARCHAR def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) name = Table('unique_name', meta, autoload=True) name.c.key.alter(type=VARCHAR(length=255)) trait = Table('trait', meta, autoload=True) trait.c.t_string.alter(type=VARCHAR(length=255)) ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py0000664000567000056710000000151613072744703034060 0ustar jenkinsjenkins00000000000000# # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) meter = sa.Table('meter', meta, autoload=True) index = sa.Index('idx_meter_rid_cname', meter.c.resource_id, meter.c.counter_name) index.create(bind=migrate_engine) ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.pyceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints0000664000567000056710000000304113072744703035377 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Intel Crop. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import ForeignKeyConstraint from sqlalchemy import MetaData, Table TABLES = ['user', 'project', 'alarm'] INDEXES = { "alarm": (('user_id', 'user', 'id'), ('project_id', 'project', 'id')), } def upgrade(migrate_engine): if migrate_engine.name == 'sqlite': return meta = MetaData(bind=migrate_engine) load_tables = dict((table_name, Table(table_name, meta, autoload=True)) for table_name in TABLES) for table_name, indexes in INDEXES.items(): table = load_tables[table_name] for column, ref_table_name, ref_column_name in indexes: ref_table = load_tables[ref_table_name] params = {'columns': [table.c[column]], 'refcolumns': [ref_table.c[ref_column_name]]} if migrate_engine.name == 'mysql': params['name'] = "_".join(('fk', table_name, column)) fkey = ForeignKeyConstraint(**params) fkey.drop() ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py0000664000567000056710000000606213072744703033427 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import ForeignKeyConstraint from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import select from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import UniqueConstraint from ceilometer.storage.sqlalchemy import migration def upgrade(migrate_engine): meta = MetaData(migrate_engine) trait_type = Table( 'trait_type', meta, Column('id', Integer, primary_key=True), Column('desc', String(255)), Column('data_type', Integer), UniqueConstraint('desc', 'data_type', name="tt_unique"), mysql_engine='InnoDB', mysql_charset='utf8', ) trait = Table('trait', meta, autoload=True) unique_name = Table('unique_name', meta, autoload=True) trait_type.create(migrate_engine) # Trait type extracts data from Trait and Unique name. # We take all trait names from Unique Name, and data types # from Trait. We then remove dtype and name from trait, and # remove the name field. conn = migrate_engine.connect() sql = ("INSERT INTO trait_type " "SELECT unique_name.id, unique_name.key, trait.t_type FROM trait " "INNER JOIN unique_name " "ON trait.name_id = unique_name.id " "GROUP BY unique_name.id, unique_name.key, trait.t_type") conn.execute(sql) conn.close() # Now we need to drop the foreign key constraint, rename # the trait.name column, and re-add a new foreign # key constraint params = {'columns': [trait.c.name_id], 'refcolumns': [unique_name.c.id]} if migrate_engine.name == 'mysql': params['name'] = "trait_ibfk_1" # foreign key to the unique name table fkey = ForeignKeyConstraint(**params) fkey.drop() Column('trait_type_id', Integer).create(trait) # Move data from name_id column into trait_type_id column query = select([trait.c.id, trait.c.name_id]) for key, value in migration.paged(query): (trait.update().where(trait.c.id == key). values({"trait_type_id": value}).execute()) trait.c.name_id.drop() params = {'columns': [trait.c.trait_type_id], 'refcolumns': [trait_type.c.id]} if migrate_engine.name == 'mysql': params['name'] = "_".join(('fk', 'trait_type', 'id')) fkey = ForeignKeyConstraint(**params) fkey.create() # Drop the t_type column to data_type. trait.c.t_type.drop() # Finally, drop the unique_name table - we don't need it # anymore. unique_name.drop() ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py0000664000567000056710000000444613072744703035277 0ustar jenkinsjenkins00000000000000# # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import ForeignKeyConstraint from sqlalchemy import MetaData, Table from sqlalchemy.sql.expression import select TABLES = ['resource', 'sourceassoc', 'user', 'project', 'meter', 'source', 'alarm'] INDEXES = { "resource": (('user_id', 'user', 'id'), ('project_id', 'project', 'id')), "sourceassoc": (('user_id', 'user', 'id'), ('project_id', 'project', 'id'), ('resource_id', 'resource', 'id'), ('meter_id', 'meter', 'id'), ('source_id', 'source', 'id')), "alarm": (('user_id', 'user', 'id'), ('project_id', 'project', 'id')), "meter": (('user_id', 'user', 'id'), ('project_id', 'project', 'id'), ('resource_id', 'resource', 'id'),) } def upgrade(migrate_engine): if migrate_engine.name == 'sqlite': return meta = MetaData(bind=migrate_engine) load_tables = dict((table_name, Table(table_name, meta, autoload=True)) for table_name in TABLES) for table_name, indexes in INDEXES.items(): table = load_tables[table_name] for column, ref_table_name, ref_column_name in indexes: ref_table = load_tables[ref_table_name] subq = select([getattr(ref_table.c, ref_column_name)]) sql_del = table.delete().where( ~ getattr(table.c, column).in_(subq)) migrate_engine.execute(sql_del) params = {'columns': [table.c[column]], 'refcolumns': [ref_table.c[ref_column_name]]} if migrate_engine.name == 'mysql': params['name'] = "_".join(('fk', table_name, column)) fkey = ForeignKeyConstraint(**params) fkey.create() ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py0000664000567000056710000000216513072744703033520 0ustar jenkinsjenkins00000000000000# Copyright 2012 Canonical. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def upgrade(migrate_engine): if migrate_engine.name == "mysql": tables = ['meter', 'user', 'resource', 'project', 'source', 'sourceassoc'] migrate_engine.execute("SET foreign_key_checks = 0") for table in tables: migrate_engine.execute( "ALTER TABLE %s CONVERT TO CHARACTER SET utf8" % table) migrate_engine.execute("SET foreign_key_checks = 1") migrate_engine.execute( "ALTER DATABASE %s DEFAULT CHARACTER SET utf8" % migrate_engine.url.database) ././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.pyceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_dateti0000664000567000056710000000367413072744703035412 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from ceilometer.storage.sqlalchemy import migration from ceilometer.storage.sqlalchemy import models _col = 'timestamp' def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False): temp_col_n = 'convert_data_type_temp_col' # Override column we're going to convert with from_t, since the type we're # replacing could be custom and we need to tell SQLALchemy how to perform # CRUD operations with it. table = sa.Table(table.name, table.metadata, sa.Column(col, from_t), extend_existing=True) sa.Column(temp_col_n, to_t).create(table) key_attr = getattr(table.c, pk_attr) orig_col = getattr(table.c, col) new_col = getattr(table.c, temp_col_n) query = sa.select([key_attr, orig_col]) for key, value in migration.paged(query): (table.update().where(key_attr == key).values({temp_col_n: value}). execute()) orig_col.drop() new_col.alter(name=col) if index: sa.Index('ix_%s_%s' % (table.name, col), new_col).create() def upgrade(migrate_engine): if migrate_engine.name == 'mysql': meta = sa.MetaData(bind=migrate_engine) meter = sa.Table('meter', meta, autoload=True) _convert_data_type(meter, _col, sa.DateTime(), models.PreciseTimestamp(), pk_attr='id', index=True) ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py0000664000567000056710000000562713072744703033431 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import ForeignKeyConstraint from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import select from sqlalchemy import String from sqlalchemy import Table from ceilometer.storage.sqlalchemy import migration def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) event_type = Table( 'event_type', meta, Column('id', Integer, primary_key=True), Column('desc', String(255), unique=True), mysql_engine='InnoDB', mysql_charset='utf8', ) event_type.create() event = Table('event', meta, autoload=True) unique_name = Table('unique_name', meta, autoload=True) # Event type is a specialization of Unique name, so # we insert into the event_type table all the distinct # unique names from the event.unique_name field along # with the key from the unique_name table, and # then rename the event.unique_name field to event.event_type conn = migrate_engine.connect() sql = ("INSERT INTO event_type " "SELECT unique_name.id, unique_name.key FROM event " "INNER JOIN unique_name " "ON event.unique_name_id = unique_name.id " "GROUP BY unique_name.id") conn.execute(sql) conn.close() # Now we need to drop the foreign key constraint, rename # the event.unique_name column, and re-add a new foreign # key constraint params = {'columns': [event.c.unique_name_id], 'refcolumns': [unique_name.c.id]} if migrate_engine.name == 'mysql': params['name'] = "event_ibfk_1" fkey = ForeignKeyConstraint(**params) fkey.drop() Column('event_type_id', Integer).create(event) # Move data from unique_name_id column into event_type_id column # and delete the entry from the unique_name table query = select([event.c.id, event.c.unique_name_id]) for key, value in migration.paged(query): (event.update().where(event.c.id == key). values({"event_type_id": value}).execute()) unique_name.delete().where(unique_name.c.id == key).execute() params = {'columns': [event.c.event_type_id], 'refcolumns': [event_type.c.id]} if migrate_engine.name == 'mysql': params['name'] = "_".join(('fk', 'event_type', 'id')) fkey = ForeignKeyConstraint(**params) fkey.create() event.c.unique_name_id.drop() ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.pyceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgs0000664000567000056710000000426513072744703035424 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE (gordc): this is a copy of 024 migration script which missed pgsql import sqlalchemy as sa from ceilometer.storage.sqlalchemy import migration from ceilometer.storage.sqlalchemy import models def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False): temp_col_n = 'convert_data_type_temp_col' # Override column we're going to convert with from_t, since the type we're # replacing could be custom and we need to tell SQLALchemy how to perform # CRUD operations with it. table = sa.Table(table.name, table.metadata, sa.Column(col, from_t), extend_existing=True) sa.Column(temp_col_n, to_t).create(table) key_attr = getattr(table.c, pk_attr) orig_col = getattr(table.c, col) new_col = getattr(table.c, temp_col_n) query = sa.select([key_attr, orig_col]) for key, value in migration.paged(query): (table.update().where(key_attr == key).values({temp_col_n: value}). execute()) orig_col.drop() new_col.alter(name=col) if index: sa.Index('ix_%s_%s' % (table.name, col), new_col).create() def upgrade(migrate_engine): if migrate_engine.name == 'postgresql': meta = sa.MetaData(bind=migrate_engine) event = sa.Table('event', meta, autoload=True) _convert_data_type(event, 'generated', sa.Float(), models.PreciseTimestamp(), pk_attr='id', index=True) trait = sa.Table('trait', meta, autoload=True) _convert_data_type(trait, 't_datetime', sa.Float(), models.PreciseTimestamp(), pk_attr='id', index=True) ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.pyceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_ala0000664000567000056710000000146613072744703035344 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine alarm = Table('alarm', meta, autoload=True) alarm.c.counter_name.alter(name='meter_name') ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py0000664000567000056710000000244713072744703033416 0ustar jenkinsjenkins00000000000000# # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Index, MetaData, Table INDEXES = { # `table_name`: ((`index_name`, `column`),) "user": (('ix_user_id', 'id'),), "source": (('ix_source_id', 'id'),), "project": (('ix_project_id', 'id'),), "meter": (('ix_meter_id', 'id'),), "alarm": (('ix_alarm_id', 'id'),), "resource": (('ix_resource_id', 'id'),) } def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) load_tables = dict((table_name, Table(table_name, meta, autoload=True)) for table_name in INDEXES.keys()) for table_name, indexes in INDEXES.items(): table = load_tables[table_name] for index_name, column in indexes: index = Index(index_name, table.c[column]) index.drop() ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py0000664000567000056710000000635513072744703033344 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import UniqueConstraint def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) meter = Table( 'meter', meta, Column('id', Integer, primary_key=True, index=True), Column('counter_name', String(255)), Column('user_id', String(255), index=True), Column('project_id', String(255), index=True), Column('resource_id', String(255)), Column('resource_metadata', String(5000)), Column('counter_type', String(255)), Column('counter_volume', Integer), Column('counter_duration', Integer), Column('timestamp', DateTime(timezone=False), index=True), Column('message_signature', String(1000)), Column('message_id', String(1000)), mysql_engine='InnoDB', mysql_charset='utf8', ) resource = Table( 'resource', meta, Column('id', String(255), primary_key=True, index=True), Column('resource_metadata', String(5000)), Column('project_id', String(255), index=True), Column('received_timestamp', DateTime(timezone=False)), Column('timestamp', DateTime(timezone=False), index=True), Column('user_id', String(255), index=True), mysql_engine='InnoDB', mysql_charset='utf8', ) user = Table( 'user', meta, Column('id', String(255), primary_key=True, index=True), mysql_engine='InnoDB', mysql_charset='utf8', ) project = Table( 'project', meta, Column('id', String(255), primary_key=True, index=True), mysql_engine='InnoDB', mysql_charset='utf8', ) sourceassoc = Table( 'sourceassoc', meta, Column('source_id', String(255), index=True), Column('user_id', String(255)), Column('project_id', String(255)), Column('resource_id', String(255)), Column('meter_id', Integer), Index('idx_su', 'source_id', 'user_id'), Index('idx_sp', 'source_id', 'project_id'), Index('idx_sr', 'source_id', 'resource_id'), Index('idx_sm', 'source_id', 'meter_id'), mysql_engine='InnoDB', mysql_charset='utf8', ) source = Table( 'source', meta, Column('id', String(255), primary_key=True, index=True), UniqueConstraint('id'), mysql_engine='InnoDB', mysql_charset='utf8', ) tables = [meter, project, resource, user, source, sourceassoc] for i in sorted(tables, key=lambda table: table.fullname): i.create() ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.pyceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.p0000664000567000056710000000166513072744703035352 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, Table, Column, DateTime def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) resource = Table('resource', meta, autoload=True) timestamp = Column('timestamp', DateTime) resource.drop_column(timestamp) received_timestamp = Column('received_timestamp', DateTime) resource.drop_column(received_timestamp) ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py0000664000567000056710000000154013072744703033571 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) meter = Table('meter', meta, autoload=True) unit = Column('counter_unit', String(255)) meter.create_column(unit) ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py0000664000567000056710000001015613072744703034057 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import migrate import sqlalchemy as sa def _handle_meter_indices(meta): if meta.bind.engine.name == 'sqlite': return resource = sa.Table('resource', meta, autoload=True) project = sa.Table('project', meta, autoload=True) user = sa.Table('user', meta, autoload=True) meter = sa.Table('meter', meta, autoload=True) indices = [(sa.Index('ix_meter_timestamp', meter.c.timestamp), sa.Index('ix_sample_timestamp', meter.c.timestamp)), (sa.Index('ix_meter_user_id', meter.c.user_id), sa.Index('ix_sample_user_id', meter.c.user_id)), (sa.Index('ix_meter_project_id', meter.c.project_id), sa.Index('ix_sample_project_id', meter.c.project_id)), (sa.Index('idx_meter_rid_cname', meter.c.resource_id, meter.c.counter_name), sa.Index('idx_sample_rid_cname', meter.c.resource_id, meter.c.counter_name))] fk_params = [({'columns': [meter.c.resource_id], 'refcolumns': [resource.c.id]}, 'fk_meter_resource_id', 'fk_sample_resource_id'), ({'columns': [meter.c.project_id], 'refcolumns': [project.c.id]}, 'fk_meter_project_id', 'fk_sample_project_id'), ({'columns': [meter.c.user_id], 'refcolumns': [user.c.id]}, 'fk_meter_user_id', 'fk_sample_user_id')] for fk in fk_params: params = fk[0] if meta.bind.engine.name == 'mysql': params['name'] = fk[1] migrate.ForeignKeyConstraint(**params).drop() for meter_ix, sample_ix in indices: meter_ix.drop() sample_ix.create() for fk in fk_params: params = fk[0] if meta.bind.engine.name == 'mysql': params['name'] = fk[2] migrate.ForeignKeyConstraint(**params).create() def _alter_sourceassoc(meta, t_name, ix_name, post_action=False): if meta.bind.engine.name == 'sqlite': return sourceassoc = sa.Table('sourceassoc', meta, autoload=True) table = sa.Table(t_name, meta, autoload=True) user = sa.Table('user', meta, autoload=True) c_name = '%s_id' % t_name col = getattr(sourceassoc.c, c_name) uniq_name = 'uniq_sourceassoc0%s0user_id' % c_name uniq_cols = (c_name, 'user_id') param = {'columns': [col], 'refcolumns': [table.c.id]} user_param = {'columns': [sourceassoc.c.user_id], 'refcolumns': [user.c.id]} if meta.bind.engine.name == 'mysql': param['name'] = 'fk_sourceassoc_%s' % c_name user_param['name'] = 'fk_sourceassoc_user_id' actions = [migrate.ForeignKeyConstraint(**user_param), migrate.ForeignKeyConstraint(**param), sa.Index(ix_name, sourceassoc.c.source_id, col), migrate.UniqueConstraint(*uniq_cols, table=sourceassoc, name=uniq_name)] for action in actions: action.create() if post_action else action.drop() def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) _handle_meter_indices(meta) meter = sa.Table('meter', meta, autoload=True) meter.rename('sample') _alter_sourceassoc(meta, 'meter', 'idx_sm') sourceassoc = sa.Table('sourceassoc', meta, autoload=True) sourceassoc.c.meter_id.alter(name='sample_id') # re-bind metadata to pick up alter name change meta = sa.MetaData(bind=migrate_engine) _alter_sourceassoc(meta, 'sample', 'idx_ss', True) ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_types.pyceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_type0000664000567000056710000000313613072744703035405 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) resource = Table('resource', meta, autoload=True) resource.c.user_id.alter(type=String(255)) resource.c.project_id.alter(type=String(255)) resource.c.resource_id.alter(type=String(255)) resource.c.source_id.alter(type=String(255)) sample = Table('sample', meta, autoload=True) sample.c.message_signature.alter(type=String(64)) sample.c.message_id.alter(type=String(128)) alarm = Table('alarm', meta, autoload=True) alarm.c.alarm_id.alter(type=String(128)) alarm.c.user_id.alter(type=String(255)) alarm.c.project_id.alter(type=String(255)) alarm_history = Table('alarm_history', meta, autoload=True) alarm_history.c.alarm_id.alter(type=String(128)) alarm_history.c.user_id.alter(type=String(255)) alarm_history.c.project_id.alter(type=String(255)) alarm_history.c.event_id.alter(type=String(128)) alarm_history.c.on_behalf_of.alter(type=String(255)) ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py0000664000567000056710000000323713072744703034435 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import ForeignKeyConstraint import sqlalchemy as sa class ForeignKeyHandle(object): def __init__(self, meta): sample = sa.Table('sample', meta, autoload=True) meter = sa.Table('meter', meta, autoload=True) self.sample_params = {'columns': [sample.c.meter_id], 'refcolumns': [meter.c.id]} if meta.bind.engine.name == 'mysql': self.sample_params['name'] = "fk_sample_meter_id" def __enter__(self): ForeignKeyConstraint(**self.sample_params).drop() def __exit__(self, type, value, traceback): ForeignKeyConstraint(**self.sample_params).create() def upgrade(migrate_engine): if migrate_engine.name == 'sqlite': return meta = sa.MetaData(bind=migrate_engine) sample = sa.Table('sample', meta, autoload=True) with ForeignKeyHandle(meta): # remove stray indexes implicitly created by InnoDB for index in sample.indexes: if index.name in ['fk_sample_meter_id', 'fk_sample_resource_id']: index.drop() sa.Index('ix_sample_meter_id', sample.c.meter_id).create() ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.pyceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.0000664000567000056710000000156513072744703035253 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData from sqlalchemy import Table from sqlalchemy import Text def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) alarm = Table('alarm', meta, autoload=True) time_constraints = Column('time_constraints', Text()) alarm.create_column(time_constraints) ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.pyceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_i0000664000567000056710000000154013072744703035274 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa # Add index on metadata_hash column of resource def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) resource = sa.Table('resource', meta, autoload=True) index = sa.Index('ix_resource_metadata_hash', resource.c.metadata_hash) index.create(bind=migrate_engine) ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/043_reduce_uuid_data_types.py0000664000567000056710000000150513072744703034761 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def upgrade(migrate_engine): # NOTE(gordc): this is a noop script to handle bug1468916 # previous lowering of id length will fail if db contains data longer. # this skips migration for those failing. the next script will resize # if this original migration passed. pass ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py0000664000567000056710000000161213072744703034724 0ustar jenkinsjenkins00000000000000 # Copyright 2013 OpenStack Foundation # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import BigInteger from sqlalchemy import MetaData from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) resource = Table('metadata_int', meta, autoload=True) resource.c.value.alter(type=BigInteger) ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py0000664000567000056710000000162313072744703032410 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Float from sqlalchemy import MetaData from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) metadata_float = Table('metadata_float', meta, autoload=True) metadata_float.c.value.alter(type=Float(53)) trait = Table('trait', meta, autoload=True) trait.c.t_float.alter(type=Float(53)) ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py0000664000567000056710000000406413072744703032367 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import Float from sqlalchemy import ForeignKey from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) unique_name = Table( 'unique_name', meta, Column('id', Integer, primary_key=True), Column('key', String(32), index=True), mysql_engine='InnoDB', mysql_charset='utf8', ) unique_name.create() event = Table( 'event', meta, Column('id', Integer, primary_key=True), Column('generated', Float(asdecimal=True), index=True), Column('unique_name_id', Integer, ForeignKey('unique_name.id')), mysql_engine='InnoDB', mysql_charset='utf8', ) event.create() trait = Table( 'trait', meta, Column('id', Integer, primary_key=True), Column('name_id', Integer, ForeignKey('unique_name.id')), Column('t_type', Integer, index=True), Column('t_string', String(32), nullable=True, default=None, index=True), Column('t_float', Float, nullable=True, default=None, index=True), Column('t_int', Integer, nullable=True, default=None, index=True), Column('t_datetime', Float(asdecimal=True), nullable=True, default=None, index=True), Column('event_id', Integer, ForeignKey('event.id')), mysql_engine='InnoDB', mysql_charset='utf8', ) trait.create() ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py0000664000567000056710000001166113072744703034106 0ustar jenkinsjenkins00000000000000# # Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import migrate import sqlalchemy as sa def get_alembic_version(meta): """Return Alembic version or None if no Alembic table exists.""" try: a_ver = sa.Table( 'alembic_version', meta, autoload=True) return sa.select([a_ver.c.version_num]).scalar() except sa.exc.NoSuchTableError: return None def delete_alembic(meta): try: sa.Table( 'alembic_version', meta, autoload=True).drop(checkfirst=True) except sa.exc.NoSuchTableError: pass INDEXES = ( # ([dialects], table_name, index_name, create/delete, uniq/not_uniq) (['mysql', 'sqlite', 'postgresql'], 'resource', 'resource_user_id_project_id_key', ('user_id', 'project_id'), True, False, True), (['mysql'], 'source', 'id', ('id',), False, True, False)) def index_cleanup(meta, table_name, uniq_name, columns, create, unique, limited): table = sa.Table(table_name, meta, autoload=True) if create: if limited and meta.bind.engine.name == 'mysql': # For some versions of mysql we can get an error # "Specified key was too long; max key length is 1000 bytes". # We should create an index by hand in this case with limited # length of columns. columns_mysql = ",".join((c + "(100)" for c in columns)) sql = ("create index %s ON %s (%s)" % (uniq_name, table, columns_mysql)) meta.bind.engine.execute(sql) else: cols = [table.c[col] for col in columns] sa.Index(uniq_name, *cols, unique=unique).create() else: if unique: migrate.UniqueConstraint(*columns, table=table, name=uniq_name).drop() else: cols = [table.c[col] for col in columns] sa.Index(uniq_name, *cols).drop() def change_uniq(meta): uniq_name = 'uniq_sourceassoc0meter_id0user_id' columns = ('meter_id', 'user_id') if meta.bind.engine.name == 'sqlite': return sourceassoc = sa.Table('sourceassoc', meta, autoload=True) meter = sa.Table('meter', meta, autoload=True) user = sa.Table('user', meta, autoload=True) if meta.bind.engine.name == 'mysql': # For mysql dialect all dependent FK should be removed # before renaming of constraint. params = {'columns': [sourceassoc.c.meter_id], 'refcolumns': [meter.c.id], 'name': 'fk_sourceassoc_meter_id'} migrate.ForeignKeyConstraint(**params).drop() params = {'columns': [sourceassoc.c.user_id], 'refcolumns': [user.c.id], 'name': 'fk_sourceassoc_user_id'} migrate.ForeignKeyConstraint(**params).drop() migrate.UniqueConstraint(*columns, table=sourceassoc, name=uniq_name).create() if meta.bind.engine.name == 'mysql': params = {'columns': [sourceassoc.c.meter_id], 'refcolumns': [meter.c.id], 'name': 'fk_sourceassoc_meter_id'} migrate.ForeignKeyConstraint(**params).create() params = {'columns': [sourceassoc.c.user_id], 'refcolumns': [user.c.id], 'name': 'fk_sourceassoc_user_id'} migrate.ForeignKeyConstraint(**params).create() def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) a_ver = get_alembic_version(meta) if not a_ver: alarm = sa.Table('alarm', meta, autoload=True) repeat_act = sa.Column('repeat_actions', sa.Boolean, server_default=sa.sql.expression.false()) alarm.create_column(repeat_act) a_ver = '43b1a023dfaa' if a_ver == '43b1a023dfaa': meter = sa.Table('meter', meta, autoload=True) meter.c.resource_metadata.alter(type=sa.Text) a_ver = '17738166b91' if a_ver == '17738166b91': for (engine_names, table_name, uniq_name, columns, create, uniq, limited) in INDEXES: if migrate_engine.name in engine_names: index_cleanup(meta, table_name, uniq_name, columns, create, uniq, limited) a_ver = 'b6ae66d05e3' if a_ver == 'b6ae66d05e3': change_uniq(meta) delete_alembic(meta) ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py0000664000567000056710000000476513072744703034177 0ustar jenkinsjenkins00000000000000# # Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import six from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import Float from sqlalchemy import ForeignKey from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy.sql import select from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import Text from ceilometer import utils tables = [('metadata_text', Text, True), ('metadata_bool', Boolean, False), ('metadata_int', Integer, False), ('metadata_float', Float, False)] def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) meter = Table('meter', meta, autoload=True) meta_tables = {} for t_name, t_type, t_nullable in tables: meta_tables[t_name] = Table( t_name, meta, Column('id', Integer, ForeignKey('meter.id'), primary_key=True), Column('meta_key', String(255), index=True, primary_key=True), Column('value', t_type, nullable=t_nullable), mysql_engine='InnoDB', mysql_charset='utf8', ) meta_tables[t_name].create() for row in select([meter]).execute(): if row['resource_metadata']: meter_id = row['id'] rmeta = json.loads(row['resource_metadata']) for key, v in utils.dict_to_keyval(rmeta): ins = None if isinstance(v, six.string_types) or v is None: ins = meta_tables['metadata_text'].insert() elif isinstance(v, bool): ins = meta_tables['metadata_bool'].insert() elif isinstance(v, six.integer_types): ins = meta_tables['metadata_int'].insert() elif isinstance(v, float): ins = meta_tables['metadata_float'].insert() if ins is not None: ins.values(id=meter_id, meta_key=key, value=v).execute() ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py0000664000567000056710000000407713072744703033105 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from sqlalchemy import MetaData, Table, Column, Index from sqlalchemy import String, Text def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine table = Table('alarm', meta, autoload=True) type = Column('type', String(50), default='threshold') type.create(table, populate_default=True) rule = Column('rule', Text()) rule.create(table) for row in table.select().execute().fetchall(): query = [] if row.matching_metadata is not None: matching_metadata = json.loads(row.matching_metadata) for key in matching_metadata: query.append({'field': key, 'op': 'eq', 'value': matching_metadata[key]}) rule = { 'meter_name': row.meter_name, 'comparison_operator': row.comparison_operator, 'threshold': row.threshold, 'statistic': row.statistic, 'evaluation_periods': row.evaluation_periods, 'period': row.period, 'query': query } table.update().where(table.c.id == row.id).values(rule=rule).execute() index = Index('ix_alarm_counter_name', table.c.meter_name) index.drop(bind=migrate_engine) table.c.meter_name.drop() table.c.comparison_operator.drop() table.c.threshold.drop() table.c.statistic.drop() table.c.evaluation_periods.drop() table.c.period.drop() table.c.matching_metadata.drop() ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_upgrade.sql0000664000567000056710000000142213072744703033422 0ustar jenkinsjenkins00000000000000ALTER TABLE trait RENAME TO trait_orig; CREATE TABLE trait_type ( id INTEGER PRIMARY KEY ASC, 'desc' STRING NOT NULL, data_type INTEGER NOT NULL, UNIQUE ('desc', data_type) ); INSERT INTO trait_type SELECT un.id, un.key, t.t_type FROM unique_name un JOIN trait_orig t ON un.id = t.name_id GROUP BY un.id; CREATE TABLE trait ( id INTEGER PRIMARY KEY ASC, t_string VARCHAR(255), t_int INTEGER, t_float FLOAT, t_datetime FLOAT, trait_type_id INTEGER NOT NULL, event_id INTEGER NOT NULL, FOREIGN KEY (trait_type_id) REFERENCES trait_type (id) FOREIGN KEY (event_id) REFERENCES event (id) ); INSERT INTO trait SELECT t.id, t.t_string, t.t_int, t.t_float, t.t_datetime, t.name_id, t.event_id FROM trait_orig t; DROP TABLE trait_orig; DROP TABLE unique_name;ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py0000664000567000056710000000465713072744703035115 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import ForeignKeyConstraint from sqlalchemy import MetaData, Table, Column, Index from sqlalchemy import String, DateTime def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine project = Table('project', meta, autoload=True) user = Table('user', meta, autoload=True) alarm_history = Table( 'alarm_history', meta, Column('event_id', String(255), primary_key=True, index=True), Column('alarm_id', String(255)), Column('on_behalf_of', String(255)), Column('project_id', String(255)), Column('user_id', String(255)), Column('type', String(20)), Column('detail', String(255)), Column('timestamp', DateTime(timezone=False)), mysql_engine='InnoDB', mysql_charset='utf8') alarm_history.create() if migrate_engine.name in ['mysql', 'postgresql']: indices = [Index('ix_alarm_history_alarm_id', alarm_history.c.alarm_id), Index('ix_alarm_history_on_behalf_of', alarm_history.c.on_behalf_of), Index('ix_alarm_history_project_id', alarm_history.c.project_id), Index('ix_alarm_history_on_user_id', alarm_history.c.user_id)] for index in indices: index.create(migrate_engine) fkeys = [ForeignKeyConstraint(columns=[alarm_history.c.on_behalf_of], refcolumns=[project.c.id]), ForeignKeyConstraint(columns=[alarm_history.c.project_id], refcolumns=[project.c.id]), ForeignKeyConstraint(columns=[alarm_history.c.user_id], refcolumns=[user.c.id])] for fkey in fkeys: fkey.create(engine=migrate_engine) ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py0000664000567000056710000000621113072744703034207 0ustar jenkinsjenkins00000000000000# # Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import migrate import sqlalchemy as sa def handle_rid_index(meta): if meta.bind.engine.name == 'sqlite': return resource = sa.Table('resource', meta, autoload=True) sample = sa.Table('sample', meta, autoload=True) params = {'columns': [sample.c.resource_id], 'refcolumns': [resource.c.id], 'name': 'fk_sample_resource_id'} if meta.bind.engine.name == 'mysql': # For mysql dialect all dependent FK should be removed # before index create/delete migrate.ForeignKeyConstraint(**params).drop() index = sa.Index('idx_sample_rid_cname', sample.c.resource_id, sample.c.counter_name) index.drop() if meta.bind.engine.name == 'mysql': migrate.ForeignKeyConstraint(**params).create() def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) meter = sa.Table( 'meter', meta, sa.Column('id', sa.Integer, primary_key=True), sa.Column('name', sa.String(255), nullable=False), sa.Column('type', sa.String(255)), sa.Column('unit', sa.String(255)), sa.UniqueConstraint('name', 'type', 'unit', name='def_unique'), mysql_engine='InnoDB', mysql_charset='utf8' ) meter.create() sample = sa.Table('sample', meta, autoload=True) query = sa.select([sample.c.counter_name, sample.c.counter_type, sample.c.counter_unit]).distinct() for row in query.execute(): meter.insert().values(name=row['counter_name'], type=row['counter_type'], unit=row['counter_unit']).execute() meter_id = sa.Column('meter_id', sa.Integer) meter_id.create(sample) params = {'columns': [sample.c.meter_id], 'refcolumns': [meter.c.id]} if migrate_engine.name == 'mysql': params['name'] = 'fk_sample_meter_id' if migrate_engine.name != 'sqlite': migrate.ForeignKeyConstraint(**params).create() index = sa.Index('ix_meter_name', meter.c.name) index.create(bind=migrate_engine) for row in sa.select([meter]).execute(): (sample.update(). where(sa.and_(sample.c.counter_name == row['name'], sample.c.counter_type == row['type'], sample.c.counter_unit == row['unit'])). values({sample.c.meter_id: row['id']}).execute()) handle_rid_index(meta) sample.c.counter_name.drop() sample.c.counter_type.drop() sample.c.counter_unit.drop() sample.c.counter_volume.alter(name='volume') ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py0000664000567000056710000000165113072744703034071 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils import sqlalchemy from ceilometer.storage.sqlalchemy import models def upgrade(migrate_engine): meta = sqlalchemy.MetaData(bind=migrate_engine) meter = sqlalchemy.Table('meter', meta, autoload=True) c = sqlalchemy.Column('recorded_at', models.PreciseTimestamp(), default=timeutils.utcnow) meter.create_column(c) ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.pyceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision0000664000567000056710000000423613072744703035417 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from ceilometer.storage.sqlalchemy import migration from ceilometer.storage.sqlalchemy import models def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False): temp_col_n = 'convert_data_type_temp_col' # Override column we're going to convert with from_t, since the type we're # replacing could be custom and we need to tell SQLALchemy how to perform # CRUD operations with it. table = sa.Table(table.name, table.metadata, sa.Column(col, from_t), extend_existing=True) sa.Column(temp_col_n, to_t).create(table) key_attr = getattr(table.c, pk_attr) orig_col = getattr(table.c, col) new_col = getattr(table.c, temp_col_n) query = sa.select([key_attr, orig_col]) for key, value in migration.paged(query): (table.update().where(key_attr == key).values({temp_col_n: value}). execute()) orig_col.drop() new_col.alter(name=col) if index: sa.Index('ix_%s_%s' % (table.name, col), new_col).create() def upgrade(migrate_engine): if migrate_engine.name == 'mysql': meta = sa.MetaData(bind=migrate_engine) event = sa.Table('event', meta, autoload=True) _convert_data_type(event, 'generated', sa.Float(), models.PreciseTimestamp(), pk_attr='id', index=True) trait = sa.Table('trait', meta, autoload=True) _convert_data_type(trait, 't_datetime', sa.Float(), models.PreciseTimestamp(), pk_attr='id', index=True) ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py0000664000567000056710000000137313072744703033227 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) event = sa.Table('event', meta, autoload=True) raw = sa.Column('raw', sa.Text) event.create_column(raw) ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.pyceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_0000664000567000056710000000160613072744703035361 0ustar jenkinsjenkins00000000000000 # Copyright 2013 OpenStack Foundation # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData from sqlalchemy import Table from sqlalchemy import Text def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) resource = Table('resource', meta, autoload=True) resource.c.resource_metadata.alter(type=Text) ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py0000664000567000056710000000137713072744703033354 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) users = Table('alarm', meta, autoload=True) users.c.id.alter(name='alarm_id') ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py0000664000567000056710000001257213072744703033604 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import migrate from oslo_serialization import jsonutils import sqlalchemy as sa m_tables = [('metadata_text', sa.Text, True), ('metadata_bool', sa.Boolean, False), ('metadata_int', sa.BigInteger, False), ('metadata_float', sa.Float(53), False)] def _migrate_meta_tables(meta, col, new_col, new_fk): for t_name, t_type, t_nullable in m_tables: m_table = sa.Table(t_name, meta, autoload=True) m_table_new = sa.Table( '%s_new' % t_name, meta, sa.Column('id', sa.Integer, sa.ForeignKey(new_fk), primary_key=True), sa.Column('meta_key', sa.String(255), primary_key=True), sa.Column('value', t_type, nullable=t_nullable), mysql_engine='InnoDB', mysql_charset='utf8', ) m_table_new.create() if m_table.select().scalar() is not None: m_table_new.insert().from_select( ['id', 'meta_key', 'value'], sa.select([new_col, m_table.c.meta_key, m_table.c.value]).where( col == m_table.c.id).group_by( new_col, m_table.c.meta_key, m_table.c.value)).execute() m_table.drop() if meta.bind.engine.name != 'sqlite': sa.Index('ix_%s_meta_key' % t_name, m_table_new.c.meta_key).create() m_table_new.rename(t_name) def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) resource = sa.Table( 'resource', meta, sa.Column('internal_id', sa.Integer, primary_key=True), sa.Column('resource_id', sa.String(255)), sa.Column('user_id', sa.String(255)), sa.Column('project_id', sa.String(255)), sa.Column('source_id', sa.String(255)), sa.Column('resource_metadata', sa.Text), sa.Column('metadata_hash', sa.String(32)), mysql_engine='InnoDB', mysql_charset='utf8') resource.create() # copy resource data in to resource table sample = sa.Table('sample', meta, autoload=True) sa.Column('metadata_hash', sa.String(32)).create(sample) for row in sa.select([sample.c.id, sample.c.resource_metadata]).execute(): sample.update().where(sample.c.id == row['id']).values( {sample.c.metadata_hash: hashlib.md5(jsonutils.dumps( row['resource_metadata'], sort_keys=True)).hexdigest()}).execute() query = sa.select([sample.c.resource_id, sample.c.user_id, sample.c.project_id, sample.c.source_id, sample.c.resource_metadata, sample.c.metadata_hash]).distinct() for row in query.execute(): resource.insert().values( resource_id=row['resource_id'], user_id=row['user_id'], project_id=row['project_id'], source_id=row['source_id'], resource_metadata=row['resource_metadata'], metadata_hash=row['metadata_hash']).execute() # link sample records to new resource records sa.Column('resource_id_new', sa.Integer).create(sample) for row in sa.select([resource]).execute(): (sample.update(). where(sa.and_( sample.c.resource_id == row['resource_id'], sample.c.user_id == row['user_id'], sample.c.project_id == row['project_id'], sample.c.source_id == row['source_id'], sample.c.metadata_hash == row['metadata_hash'])). values({sample.c.resource_id_new: row['internal_id']}).execute()) sample.c.resource_id.drop() sample.c.metadata_hash.drop() sample.c.resource_id_new.alter(name='resource_id') # re-bind metadata to pick up alter name change meta = sa.MetaData(bind=migrate_engine) sample = sa.Table('sample', meta, autoload=True) resource = sa.Table('resource', meta, autoload=True) if migrate_engine.name != 'sqlite': sa.Index('ix_resource_resource_id', resource.c.resource_id).create() sa.Index('ix_sample_user_id', sample.c.user_id).drop() sa.Index('ix_sample_project_id', sample.c.project_id).drop() sa.Index('ix_sample_resource_id', sample.c.resource_id).create() sa.Index('ix_sample_meter_id_resource_id', sample.c.meter_id, sample.c.resource_id).create() params = {'columns': [sample.c.resource_id], 'refcolumns': [resource.c.internal_id]} if migrate_engine.name == 'mysql': params['name'] = 'fk_sample_resource_internal_id' migrate.ForeignKeyConstraint(**params).create() sample.c.user_id.drop() sample.c.project_id.drop() sample.c.source_id.drop() sample.c.resource_metadata.drop() _migrate_meta_tables(meta, sample.c.id, sample.c.resource_id, 'resource.internal_id') ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py0000664000567000056710000000327113072744703034360 0ustar jenkinsjenkins00000000000000# # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.changeset.constraint import UniqueConstraint import sqlalchemy def upgrade(migrate_engine): meta = sqlalchemy.MetaData(bind=migrate_engine) event = sqlalchemy.Table('event', meta, autoload=True) message_id = sqlalchemy.Column('message_id', sqlalchemy.String(50)) event.create_column(message_id) cons = UniqueConstraint('message_id', table=event) cons.create() index = sqlalchemy.Index('idx_event_message_id', event.c.message_id) index.create(bind=migrate_engine) # Populate the new column ... trait = sqlalchemy.Table('trait', meta, autoload=True) unique_name = sqlalchemy.Table('unique_name', meta, autoload=True) join = trait.join(unique_name, unique_name.c.id == trait.c.name_id) traits = sqlalchemy.select([trait.c.event_id, trait.c.t_string], whereclause=(unique_name.c.key == 'message_id'), from_obj=join) for event_id, value in traits.execute(): (event.update().where(event.c.id == event_id).values(message_id=value). execute()) # Leave the Trait, makes the rollback easier and won't really hurt anyone. ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.pyceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_tex0000664000567000056710000000160013072744703035371 0ustar jenkinsjenkins00000000000000 # Copyright 2013 OpenStack Foundation # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData from sqlalchemy import Table from sqlalchemy import Text def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) alm_hist = Table('alarm_history', meta, autoload=True) alm_hist.c.detail.alter(type=Text) ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py0000664000567000056710000000154513072744703034106 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) alarm = Table('alarm', meta, autoload=True) severity = Column('severity', String(50)) alarm.create_column(severity) ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py0000664000567000056710000000226613072744703033577 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa TABLES_012 = ['resource', 'sourceassoc', 'user', 'project', 'meter', 'source', 'alarm'] TABLES_027 = ['user', 'project', 'alarm'] def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) for table_name in TABLES_027: try: (sa.Table('dump027_' + table_name, meta, autoload=True). drop(checkfirst=True)) except sa.exc.NoSuchTableError: pass for table_name in TABLES_012: try: (sa.Table('dump_' + table_name, meta, autoload=True). drop(checkfirst=True)) except sa.exc.NoSuchTableError: pass ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_upgrade.sql0000664000567000056710000000116313072744703033422 0ustar jenkinsjenkins00000000000000CREATE TABLE event_type ( id INTEGER PRIMARY KEY ASC, desc STRING NOT NULL ); INSERT INTO event_type SELECT un.id, un.key FROM unique_name un JOIN event e ON un.id = e.unique_name_id GROUP BY un.id; ALTER TABLE event RENAME TO event_orig; CREATE TABLE event ( id INTEGER PRIMARY KEY ASC, generated FLOAT NOT NULL, message_id VARCHAR(50) UNIQUE, event_type_id INTEGER NOT NULL, FOREIGN KEY (event_type_id) REFERENCES event_type (id) ); INSERT INTO event SELECT id, generated, message_id, unique_name_id FROM event_orig; DROP TABLE event_orig; DELETE FROM unique_name WHERE id IN (SELECT id FROM event_type); ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py0000664000567000056710000000423213072744703034313 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from ceilometer.storage.sqlalchemy import models tables = [('trait_text', sa.String(255), True, 't_string', 1), ('trait_int', sa.Integer, False, 't_int', 2), ('trait_float', sa.Float(53), False, 't_float', 3), ('trait_datetime', models.PreciseTimestamp(), False, 't_datetime', 4)] def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) trait = sa.Table('trait', meta, autoload=True) event = sa.Table('event', meta, autoload=True) trait_type = sa.Table('trait_type', meta, autoload=True) for t_name, t_type, t_nullable, col_name, __ in tables: t_table = sa.Table( t_name, meta, sa.Column('event_id', sa.Integer, sa.ForeignKey(event.c.id), primary_key=True), sa.Column('key', sa.String(255), primary_key=True), sa.Column('value', t_type, nullable=t_nullable), sa.Index('ix_%s_event_id_key' % t_name, 'event_id', 'key'), mysql_engine='InnoDB', mysql_charset='utf8', ) t_table.create() query = sa.select( [trait.c.event_id, trait_type.c.desc, trait.c[col_name]]).select_from( trait.join(trait_type, trait.c.trait_type_id == trait_type.c.id)).where( trait.c[col_name] != sa.null()) if query.alias().select().scalar() is not None: t_table.insert().from_select( ['event_id', 'key', 'value'], query).execute() trait.drop() trait_type.drop() ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py0000664000567000056710000000157013072744703035176 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # # Copyright 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Float from sqlalchemy import MetaData from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) meter = Table('meter', meta, autoload=True) meter.c.counter_volume.alter(type=Float(53)) ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/__init__.py0000664000567000056710000000000013072744703031405 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py0000664000567000056710000000154713072744703033452 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) meter = Table('meter', meta, autoload=True) duration = Column('counter_duration', Integer) meter.drop_column(duration) ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migrate_repo/__init__.py0000664000567000056710000000000013072744703027535 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/storage/sqlalchemy/migration.py0000664000567000056710000000172313072744703025327 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def paged(query, size=1000): """Page query results :param query: the SQLAlchemy query to execute :param size: the max page size return: generator with query data """ offset = 0 while True: page = query.offset(offset).limit(size).execute() if page.rowcount <= 0: # There are no more rows break for row in page: yield row offset += size ceilometer-6.1.5/ceilometer/storage/sqlalchemy/utils.py0000664000567000056710000001133713072744706024503 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import operator import six from sqlalchemy import and_ from sqlalchemy import asc from sqlalchemy import desc from sqlalchemy import not_ from sqlalchemy import or_ from sqlalchemy.orm import aliased import ceilometer from ceilometer.storage.sqlalchemy import models META_TYPE_MAP = {bool: models.MetaBool, str: models.MetaText, six.text_type: models.MetaText, type(None): models.MetaText, int: models.MetaBigInt, float: models.MetaFloat} if six.PY2: META_TYPE_MAP[long] = models.MetaBigInt class QueryTransformer(object): operators = {"=": operator.eq, "<": operator.lt, ">": operator.gt, "<=": operator.le, "=<": operator.le, ">=": operator.ge, "=>": operator.ge, "!=": operator.ne, "in": lambda field_name, values: field_name.in_(values), "=~": lambda field, value: field.op("regexp")(value)} # operators which are differs for different dialects dialect_operators = {'postgresql': {'=~': (lambda field, value: field.op("~")(value))}} complex_operators = {"or": or_, "and": and_, "not": not_} ordering_functions = {"asc": asc, "desc": desc} def __init__(self, table, query, dialect='mysql'): self.table = table self.query = query self.dialect_name = dialect def _get_operator(self, op): return (self.dialect_operators.get(self.dialect_name, {}).get(op) or self.operators[op]) def _handle_complex_op(self, complex_op, nodes): op = self.complex_operators[complex_op] if op == not_: nodes = [nodes] element_list = [] for node in nodes: element = self._transform(node) element_list.append(element) return op(*element_list) def _handle_simple_op(self, simple_op, nodes): op = self._get_operator(simple_op) field_name, value = list(nodes.items())[0] if field_name.startswith('resource_metadata.'): return self._handle_metadata(op, field_name, value) else: return op(getattr(self.table, field_name), value) def _handle_metadata(self, op, field_name, value): if op == self.operators["in"]: raise ceilometer.NotImplementedError('Metadata query with in ' 'operator is not implemented') field_name = field_name[len('resource_metadata.'):] meta_table = META_TYPE_MAP[type(value)] meta_alias = aliased(meta_table) on_clause = and_(self.table.internal_id == meta_alias.id, meta_alias.meta_key == field_name) # outer join is needed to support metaquery # with or operator on non existent metadata field # see: test_query_non_existing_metadata_with_result # test case. self.query = self.query.outerjoin(meta_alias, on_clause) return op(meta_alias.value, value) def _transform(self, sub_tree): operator, nodes = list(sub_tree.items())[0] if operator in self.complex_operators: return self._handle_complex_op(operator, nodes) else: return self._handle_simple_op(operator, nodes) def apply_filter(self, expression_tree): condition = self._transform(expression_tree) self.query = self.query.filter(condition) def apply_options(self, orderby, limit): self._apply_order_by(orderby) if limit is not None: self.query = self.query.limit(limit) def _apply_order_by(self, orderby): if orderby is not None: for field in orderby: attr, order = list(field.items())[0] ordering_function = self.ordering_functions[order] self.query = self.query.order_by(ordering_function( getattr(self.table, attr))) else: self.query = self.query.order_by(desc(self.table.timestamp)) def get_query(self): return self.query ceilometer-6.1.5/ceilometer/storage/sqlalchemy/__init__.py0000664000567000056710000000000013072744703025060 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/storage/sqlalchemy/models.py0000664000567000056710000002526713072744706024635 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for Ceilometer data. """ import hashlib import json from oslo_utils import timeutils import six from sqlalchemy import (Column, Integer, String, ForeignKey, Index, UniqueConstraint, BigInteger) from sqlalchemy import event from sqlalchemy import Float, Boolean, Text, DateTime from sqlalchemy.dialects.mysql import DECIMAL from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import deferred from sqlalchemy.orm import relationship from sqlalchemy.types import TypeDecorator from ceilometer import utils class JSONEncodedDict(TypeDecorator): """Represents an immutable structure as a json-encoded string.""" impl = Text @staticmethod def process_bind_param(value, dialect): if value is not None: value = json.dumps(value) return value @staticmethod def process_result_value(value, dialect): if value is not None: value = json.loads(value) return value class PreciseTimestamp(TypeDecorator): """Represents a timestamp precise to the microsecond.""" impl = DateTime def load_dialect_impl(self, dialect): if dialect.name == 'mysql': return dialect.type_descriptor(DECIMAL(precision=20, scale=6, asdecimal=True)) return self.impl @staticmethod def process_bind_param(value, dialect): if value is None: return value elif dialect.name == 'mysql': return utils.dt_to_decimal(value) return value @staticmethod def process_result_value(value, dialect): if value is None: return value elif dialect.name == 'mysql': return utils.decimal_to_dt(value) return value _COMMON_TABLE_ARGS = {'mysql_charset': "utf8", 'mysql_engine': "InnoDB"} class CeilometerBase(object): """Base class for Ceilometer Models.""" __table_args__ = _COMMON_TABLE_ARGS __table_initialized__ = False def __setitem__(self, key, value): setattr(self, key, value) def __getitem__(self, key): return getattr(self, key) def update(self, values): """Make the model object behave like a dict.""" for k, v in six.iteritems(values): setattr(self, k, v) Base = declarative_base(cls=CeilometerBase) class MetaText(Base): """Metering text metadata.""" __tablename__ = 'metadata_text' __table_args__ = ( Index('ix_meta_text_key', 'meta_key'), _COMMON_TABLE_ARGS, ) id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True) meta_key = Column(String(255), primary_key=True) value = Column(Text) class MetaBool(Base): """Metering boolean metadata.""" __tablename__ = 'metadata_bool' __table_args__ = ( Index('ix_meta_bool_key', 'meta_key'), _COMMON_TABLE_ARGS, ) id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True) meta_key = Column(String(255), primary_key=True) value = Column(Boolean) class MetaBigInt(Base): """Metering integer metadata.""" __tablename__ = 'metadata_int' __table_args__ = ( Index('ix_meta_int_key', 'meta_key'), _COMMON_TABLE_ARGS, ) id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True) meta_key = Column(String(255), primary_key=True) value = Column(BigInteger, default=False) class MetaFloat(Base): """Metering float metadata.""" __tablename__ = 'metadata_float' __table_args__ = ( Index('ix_meta_float_key', 'meta_key'), _COMMON_TABLE_ARGS, ) id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True) meta_key = Column(String(255), primary_key=True) value = Column(Float(53), default=False) class Meter(Base): """Meter definition data.""" __tablename__ = 'meter' __table_args__ = ( UniqueConstraint('name', 'type', 'unit', name='def_unique'), Index('ix_meter_name', 'name'), _COMMON_TABLE_ARGS, ) id = Column(Integer, primary_key=True) name = Column(String(255), nullable=False) type = Column(String(255)) unit = Column(String(255)) samples = relationship("Sample", backref="meter") class Resource(Base): """Resource data.""" __tablename__ = 'resource' __table_args__ = ( # TODO(gordc): this should exist but the attribute values we set # for user/project/source/resource id's are too large # for a uuid. # UniqueConstraint('resource_id', 'user_id', 'project_id', # 'source_id', 'metadata_hash', # name='res_def_unique'), Index('ix_resource_resource_id', 'resource_id'), Index('ix_resource_metadata_hash', 'metadata_hash'), _COMMON_TABLE_ARGS, ) internal_id = Column(Integer, primary_key=True) user_id = Column(String(255)) project_id = Column(String(255)) source_id = Column(String(255)) resource_id = Column(String(255), nullable=False) resource_metadata = deferred(Column(JSONEncodedDict())) metadata_hash = deferred(Column(String(32))) samples = relationship("Sample", backref="resource") meta_text = relationship("MetaText", backref="resource", cascade="all, delete-orphan") meta_float = relationship("MetaFloat", backref="resource", cascade="all, delete-orphan") meta_int = relationship("MetaBigInt", backref="resource", cascade="all, delete-orphan") meta_bool = relationship("MetaBool", backref="resource", cascade="all, delete-orphan") @event.listens_for(Resource, "before_insert") def before_insert(mapper, connection, target): metadata = json.dumps(target.resource_metadata, sort_keys=True) target.metadata_hash = hashlib.md5(metadata).hexdigest() class Sample(Base): """Metering data.""" __tablename__ = 'sample' __table_args__ = ( Index('ix_sample_timestamp', 'timestamp'), Index('ix_sample_resource_id', 'resource_id'), Index('ix_sample_meter_id', 'meter_id'), Index('ix_sample_meter_id_resource_id', 'meter_id', 'resource_id'), _COMMON_TABLE_ARGS, ) id = Column(Integer, primary_key=True) meter_id = Column(Integer, ForeignKey('meter.id')) resource_id = Column(Integer, ForeignKey('resource.internal_id')) volume = Column(Float(53)) timestamp = Column(PreciseTimestamp(), default=lambda: timeutils.utcnow()) recorded_at = Column(PreciseTimestamp(), default=lambda: timeutils.utcnow()) message_signature = Column(String(64)) message_id = Column(String(128)) class FullSample(object): """A fake model for query samples.""" id = Sample.id timestamp = Sample.timestamp message_id = Sample.message_id message_signature = Sample.message_signature recorded_at = Sample.recorded_at counter_name = Meter.name counter_type = Meter.type counter_unit = Meter.unit counter_volume = Sample.volume resource_id = Resource.resource_id source_id = Resource.source_id user_id = Resource.user_id project_id = Resource.project_id resource_metadata = Resource.resource_metadata internal_id = Resource.internal_id class EventType(Base): """Types of event records.""" __tablename__ = 'event_type' id = Column(Integer, primary_key=True) desc = Column(String(255), unique=True) def __init__(self, event_type): self.desc = event_type def __repr__(self): return "" % self.desc class Event(Base): __tablename__ = 'event' __table_args__ = ( Index('ix_event_message_id', 'message_id'), Index('ix_event_type_id', 'event_type_id'), Index('ix_event_generated', 'generated'), _COMMON_TABLE_ARGS, ) id = Column(Integer, primary_key=True) message_id = Column(String(50), unique=True) generated = Column(PreciseTimestamp()) raw = deferred(Column(JSONEncodedDict())) event_type_id = Column(Integer, ForeignKey('event_type.id')) event_type = relationship("EventType", backref='events') def __init__(self, message_id, event_type, generated, raw): self.message_id = message_id self.event_type = event_type self.generated = generated self.raw = raw def __repr__(self): return "" % (self.id, self.message_id, self.event_type, self.generated) class TraitText(Base): """Event text traits.""" __tablename__ = 'trait_text' __table_args__ = ( Index('ix_trait_text_event_id_key', 'event_id', 'key'), _COMMON_TABLE_ARGS, ) event_id = Column(Integer, ForeignKey('event.id'), primary_key=True) key = Column(String(255), primary_key=True) value = Column(String(255)) class TraitInt(Base): """Event integer traits.""" __tablename__ = 'trait_int' __table_args__ = ( Index('ix_trait_int_event_id_key', 'event_id', 'key'), _COMMON_TABLE_ARGS, ) event_id = Column(Integer, ForeignKey('event.id'), primary_key=True) key = Column(String(255), primary_key=True) value = Column(Integer) class TraitFloat(Base): """Event float traits.""" __tablename__ = 'trait_float' __table_args__ = ( Index('ix_trait_float_event_id_key', 'event_id', 'key'), _COMMON_TABLE_ARGS, ) event_id = Column(Integer, ForeignKey('event.id'), primary_key=True) key = Column(String(255), primary_key=True) value = Column(Float(53)) class TraitDatetime(Base): """Event datetime traits.""" __tablename__ = 'trait_datetime' __table_args__ = ( Index('ix_trait_datetime_event_id_key', 'event_id', 'key'), _COMMON_TABLE_ARGS, ) event_id = Column(Integer, ForeignKey('event.id'), primary_key=True) key = Column(String(255), primary_key=True) value = Column(PreciseTimestamp()) ceilometer-6.1.5/ceilometer/storage/base.py0000664000567000056710000002140313072744706022106 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for storage engines """ import datetime import inspect import math from oslo_utils import timeutils import six from six import moves import ceilometer def iter_period(start, end, period): """Split a time from start to end in periods of a number of seconds. This function yields the (start, end) time for each period composing the time passed as argument. :param start: When the period set start. :param end: When the period end starts. :param period: The duration of the period. """ period_start = start increment = datetime.timedelta(seconds=period) for i in moves.xrange(int(math.ceil( timeutils.delta_seconds(start, end) / float(period)))): next_start = period_start + increment yield (period_start, next_start) period_start = next_start def _handle_sort_key(model_name, sort_key=None): """Generate sort keys according to the passed in sort key from user. :param model_name: Database model name be query.(meter, etc.) :param sort_key: sort key passed from user. return: sort keys list """ sort_keys_extra = {'meter': ['user_id', 'project_id'], 'resource': ['user_id', 'project_id', 'timestamp'], } sort_keys = sort_keys_extra[model_name] if not sort_key: return sort_keys # NOTE(Fengqian): We need to put the sort key from user # in the first place of sort keys list. try: sort_keys.remove(sort_key) except ValueError: pass finally: sort_keys.insert(0, sort_key) return sort_keys class MultipleResultsFound(Exception): pass class NoResultFound(Exception): pass class Model(object): """Base class for storage API models.""" def __init__(self, **kwds): self.fields = list(kwds) for k, v in six.iteritems(kwds): setattr(self, k, v) def as_dict(self): d = {} for f in self.fields: v = getattr(self, f) if isinstance(v, Model): v = v.as_dict() elif isinstance(v, list) and v and isinstance(v[0], Model): v = [sub.as_dict() for sub in v] d[f] = v return d def __eq__(self, other): return self.as_dict() == other.as_dict() @classmethod def get_field_names(cls): fields = inspect.getargspec(cls.__init__)[0] return set(fields) - set(["self"]) class Connection(object): """Base class for storage system connections.""" # A dictionary representing the capabilities of this driver. CAPABILITIES = { 'meters': {'query': {'simple': False, 'metadata': False, 'complex': False}}, 'resources': {'query': {'simple': False, 'metadata': False, 'complex': False}}, 'samples': {'query': {'simple': False, 'metadata': False, 'complex': False}}, 'statistics': {'groupby': False, 'query': {'simple': False, 'metadata': False, 'complex': False}, 'aggregation': {'standard': False, 'selectable': { 'max': False, 'min': False, 'sum': False, 'avg': False, 'count': False, 'stddev': False, 'cardinality': False}} }, } STORAGE_CAPABILITIES = { 'storage': {'production_ready': False}, } def __init__(self, url): pass @staticmethod def upgrade(): """Migrate the database to `version` or the most recent version.""" @staticmethod def record_metering_data(data): """Write the data to the backend storage system. :param data: a dictionary such as returned by ceilometer.meter.meter_message_from_counter All timestamps must be naive utc datetime object. """ raise ceilometer.NotImplementedError( 'Recording metering data is not implemented') @staticmethod def clear_expired_metering_data(ttl): """Clear expired data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep records for. """ raise ceilometer.NotImplementedError( 'Clearing samples not implemented') @staticmethod def get_resources(user=None, project=None, source=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, metaquery=None, resource=None, limit=None): """Return an iterable of models.Resource instances. Iterable items containing resource information. :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param start_timestamp_op: Optional timestamp start range operation. :param end_timestamp: Optional modified timestamp end range. :param end_timestamp_op: Optional timestamp end range operation. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. :param limit: Maximum number of results to return. """ raise ceilometer.NotImplementedError('Resources not implemented') @staticmethod def get_meters(user=None, project=None, resource=None, source=None, metaquery=None, limit=None, unique=False): """Return an iterable of model.Meter instances. Iterable items containing meter information. :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param resource: Optional resource filter. :param source: Optional source filter. :param metaquery: Optional dict with metadata to match on. :param limit: Maximum number of results to return. :param unique: If set to true, return only unique meter information. """ raise ceilometer.NotImplementedError('Meters not implemented') @staticmethod def get_samples(sample_filter, limit=None): """Return an iterable of model.Sample instances. :param sample_filter: Filter. :param limit: Maximum number of results to return. """ raise ceilometer.NotImplementedError('Samples not implemented') @staticmethod def get_meter_statistics(sample_filter, period=None, groupby=None, aggregate=None): """Return an iterable of model.Statistics instances. The filter must have a meter value set. """ raise ceilometer.NotImplementedError('Statistics not implemented') @staticmethod def clear(): """Clear database.""" @staticmethod def query_samples(filter_expr=None, orderby=None, limit=None): """Return an iterable of model.Sample objects. :param filter_expr: Filter expression for query. :param orderby: List of field name and direction pairs for order by. :param limit: Maximum number of results to return. """ raise ceilometer.NotImplementedError('Complex query for samples ' 'is not implemented.') @classmethod def get_capabilities(cls): """Return an dictionary with the capabilities of each driver.""" return cls.CAPABILITIES @classmethod def get_storage_capabilities(cls): """Return a dictionary representing the performance capabilities. This is needed to evaluate the performance of each driver. """ return cls.STORAGE_CAPABILITIES ceilometer-6.1.5/ceilometer/storage/hbase/0000775000567000056710000000000013072745164021703 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/storage/hbase/base.py0000664000567000056710000000630113072744706023170 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import happybase from oslo_log import log from oslo_utils import netutils from six.moves.urllib import parse as urlparse from ceilometer.storage.hbase import inmemory as hbase_inmemory LOG = log.getLogger(__name__) class Connection(object): """Base connection class for HBase.""" _memory_instance = None def __init__(self, url): """Hbase Connection Initialization.""" opts = self._parse_connection_url(url) if opts['host'] == '__test__': url = os.environ.get('CEILOMETER_TEST_HBASE_URL') if url: # Reparse URL, but from the env variable now opts = self._parse_connection_url(url) self.conn_pool = self._get_connection_pool(opts) else: # This is a in-memory usage for unit tests if Connection._memory_instance is None: LOG.debug('Creating a new in-memory HBase ' 'Connection object') Connection._memory_instance = (hbase_inmemory. MConnectionPool()) self.conn_pool = Connection._memory_instance else: self.conn_pool = self._get_connection_pool(opts) @staticmethod def _get_connection_pool(conf): """Return a connection pool to the database. .. note:: The tests use a subclass to override this and return an in-memory connection pool. """ LOG.debug('connecting to HBase on %(host)s:%(port)s', {'host': conf['host'], 'port': conf['port']}) return happybase.ConnectionPool( size=100, host=conf['host'], port=conf['port'], table_prefix=conf['table_prefix'], table_prefix_separator=conf['table_prefix_separator']) @staticmethod def _parse_connection_url(url): """Parse connection parameters from a database url. .. note:: HBase Thrift does not support authentication and there is no database name, so we are not looking for these in the url. """ opts = {} result = netutils.urlsplit(url) opts['table_prefix'] = urlparse.parse_qs( result.query).get('table_prefix', [None])[0] opts['table_prefix_separator'] = urlparse.parse_qs( result.query).get('table_prefix_separator', ['_'])[0] opts['dbtype'] = result.scheme if ':' in result.netloc: opts['host'], port = result.netloc.split(':') else: opts['host'] = result.netloc port = 9090 opts['port'] = port and int(port) or 9090 return opts ceilometer-6.1.5/ceilometer/storage/hbase/migration.py0000664000567000056710000000734413072744706024257 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """HBase storage backend migrations """ import re from ceilometer.storage.hbase import utils as hbase_utils def migrate_resource_table(conn, table): """Migrate table 'resource' in HBase. Change qualifiers format from "%s+%s+%s!%s!%s" % (rts, source, counter_name, counter_type,counter_unit) in columns with meters f:m_* to new separator format "%s:%s:%s:%s:%s" % (rts, source, counter_name, counter_type,counter_unit) """ resource_table = conn.table(table) resource_filter = ("QualifierFilter(=, " "'regexstring:m_\\d{19}\\+" "[\\w-\\._]*\\+[\\w-\\._!]')") gen = resource_table.scan(filter=resource_filter) for row, data in gen: columns = [] updated_columns = dict() column_prefix = "f:" for column, value in data.items(): if column.startswith('f:m_'): columns.append(column) parts = column[2:].split("+", 2) parts.extend(parts.pop(2).split("!")) column = hbase_utils.prepare_key(*parts) updated_columns[column_prefix + column] = value resource_table.put(row, updated_columns) resource_table.delete(row, columns) def migrate_meter_table(conn, table): """Migrate table 'meter' in HBase. Change row format from "%s_%d_%s" % (counter_name, rts, message_signature) to new separator format "%s:%s:%s" % (counter_name, rts, message_signature) """ meter_table = conn.table(table) meter_filter = ("RowFilter(=, " "'regexstring:[\\w\\._-]*_\\d{19}_\\w*')") gen = meter_table.scan(filter=meter_filter) for row, data in gen: parts = row.rsplit('_', 2) new_row = hbase_utils.prepare_key(*parts) meter_table.put(new_row, data) meter_table.delete(row) def migrate_event_table(conn, table): """Migrate table 'event' in HBase. Change row format from ""%d_%s" % timestamp, event_id, to new separator format "%s:%s" % timestamp, event_id Also change trait columns from %s+%s % trait.name, trait.dtype to %s:%s % trait.name, trait.dtype """ event_table = conn.table(table) event_filter = "RowFilter(=, 'regexstring:\\d*_\\w*')" gen = event_table.scan(filter=event_filter) trait_pattern = re.compile("f:[\w\-_]*\+\w") column_prefix = "f:" for row, data in gen: row_parts = row.split("_", 1) update_data = {} for column, value in data.items(): if trait_pattern.match(column): trait_parts = column[2:].rsplit('+', 1) column = hbase_utils.prepare_key(*trait_parts) update_data[column_prefix + column] = value new_row = hbase_utils.prepare_key(*row_parts) event_table.put(new_row, update_data) event_table.delete(row) TABLE_MIGRATION_FUNCS = {'resource': migrate_resource_table, 'meter': migrate_meter_table, 'event': migrate_event_table} def migrate_tables(conn, tables): if type(tables) is not list: tables = [tables] for table in tables: if table in TABLE_MIGRATION_FUNCS: TABLE_MIGRATION_FUNCS.get(table)(conn, table) ceilometer-6.1.5/ceilometer/storage/hbase/inmemory.py0000664000567000056710000002252213072744706024120 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This is a very crude version of "in-memory HBase", which implements just enough functionality of HappyBase API to support testing of our driver. """ import copy import re from oslo_log import log import six import ceilometer LOG = log.getLogger(__name__) class MTable(object): """HappyBase.Table mock.""" def __init__(self, name, families): self.name = name self.families = families self._rows_with_ts = {} def row(self, key, columns=None): if key not in self._rows_with_ts: return {} res = copy.copy(sorted(six.iteritems( self._rows_with_ts.get(key)))[-1][1]) if columns: keys = res.keys() for key in keys: if key not in columns: res.pop(key) return res def rows(self, keys): return ((k, self.row(k)) for k in keys) def put(self, key, data, ts=None): # Note: Now we use 'timestamped' but only for one Resource table. # That's why we may put ts='0' in case when ts is None. If it is # needed to use 2 types of put in one table ts=0 cannot be used. if ts is None: ts = "0" if key not in self._rows_with_ts: self._rows_with_ts[key] = {ts: data} else: if ts in self._rows_with_ts[key]: self._rows_with_ts[key][ts].update(data) else: self._rows_with_ts[key].update({ts: data}) def delete(self, key): del self._rows_with_ts[key] def _get_latest_dict(self, row): # The idea here is to return latest versions of columns. # In _rows_with_ts we store {row: {ts_1: {data}, ts_2: {data}}}. # res will contain a list of tuples [(ts_1, {data}), (ts_2, {data})] # sorted by ts, i.e. in this list ts_2 is the most latest. # To get result as HBase provides we should iterate in reverse order # and get from "latest" data only key-values that are not in newer data data = {} for i in sorted(six.iteritems(self._rows_with_ts[row])): data.update(i[1]) return data def scan(self, filter=None, columns=None, row_start=None, row_stop=None, limit=None): columns = columns or [] sorted_keys = sorted(self._rows_with_ts) # copy data between row_start and row_stop into a dict rows = {} for row in sorted_keys: if row_start and row < row_start: continue if row_stop and row > row_stop: break rows[row] = self._get_latest_dict(row) if columns: ret = {} for row, data in six.iteritems(rows): for key in data: if key in columns: ret[row] = data rows = ret if filter: # TODO(jdanjou): we should really parse this properly, # but at the moment we are only going to support AND here filters = filter.split('AND') for f in filters: # Extract filter name and its arguments g = re.search("(.*)\((.*),?\)", f) fname = g.group(1).strip() fargs = [s.strip().replace('\'', '') for s in g.group(2).split(',')] m = getattr(self, fname) if callable(m): # overwrite rows for filtering to take effect # in case of multiple filters rows = m(fargs, rows) else: raise ceilometer.NotImplementedError( "%s filter is not implemented, " "you may want to add it!") for k in sorted(rows)[:limit]: yield k, rows[k] @staticmethod def SingleColumnValueFilter(args, rows): """This is filter for testing "in-memory HBase". This method is called from scan() when 'SingleColumnValueFilter' is found in the 'filter' argument. """ op = args[2] column = "%s:%s" % (args[0], args[1]) value = args[3] if value.startswith('binary:'): value = value[7:] r = {} for row in rows: data = rows[row] if op == '=': if column in data and data[column] == value: r[row] = data elif op == '<': if column in data and data[column] < value: r[row] = data elif op == '<=': if column in data and data[column] <= value: r[row] = data elif op == '>': if column in data and data[column] > value: r[row] = data elif op == '>=': if column in data and data[column] >= value: r[row] = data elif op == '!=': if column in data and data[column] != value: r[row] = data return r @staticmethod def ColumnPrefixFilter(args, rows): """This is filter for testing "in-memory HBase". This method is called from scan() when 'ColumnPrefixFilter' is found in the 'filter' argument. :param args: a list of filter arguments, contain prefix of column :param rows: a dict of row prefixes for filtering """ value = args[0] column = 'f:' + value r = {} for row, data in rows.items(): column_dict = {} for key in data: if key.startswith(column): column_dict[key] = data[key] r[row] = column_dict return r @staticmethod def RowFilter(args, rows): """This is filter for testing "in-memory HBase". This method is called from scan() when 'RowFilter' is found in the 'filter' argument. :param args: a list of filter arguments, it contains operator and sought string :param rows: a dict of rows which are filtered """ op = args[0] value = args[1] if value.startswith('regexstring:'): value = value[len('regexstring:'):] r = {} for row, data in rows.items(): try: g = re.search(value, row).group() if op == '=': if g == row: r[row] = data else: raise ceilometer.NotImplementedError( "In-memory " "RowFilter doesn't support " "the %s operation yet" % op) except AttributeError: pass return r @staticmethod def QualifierFilter(args, rows): """This is filter for testing "in-memory HBase". This method is called from scan() when 'QualifierFilter' is found in the 'filter' argument """ op = args[0] value = args[1] is_regex = False if value.startswith('binaryprefix:'): value = value[len('binaryprefix:'):] if value.startswith('regexstring:'): value = value[len('regexstring:'):] is_regex = True column = 'f:' + value r = {} for row in rows: data = rows[row] r_data = {} for key in data: if ((op == '=' and key.startswith(column)) or (op == '>=' and key >= column) or (op == '<=' and key <= column) or (op == '>' and key > column) or (op == '<' and key < column) or (is_regex and re.search(value, key))): r_data[key] = data[key] else: raise ceilometer.NotImplementedError( "In-memory QualifierFilter " "doesn't support the %s " "operation yet" % op) if r_data: r[row] = r_data return r class MConnectionPool(object): def __init__(self): self.conn = MConnection() def connection(self): return self.conn class MConnection(object): """HappyBase.Connection mock.""" def __init__(self): self.tables = {} def __enter__(self, *args, **kwargs): return self def __exit__(self, exc_type, exc_val, exc_tb): pass @staticmethod def open(): LOG.debug("Opening in-memory HBase connection") def create_table(self, n, families=None): families = families or {} if n in self.tables: return self.tables[n] t = MTable(n, families) self.tables[n] = t return t def delete_table(self, name, use_prefix=True): del self.tables[name] def table(self, name): return self.create_table(name) ceilometer-6.1.5/ceilometer/storage/hbase/utils.py0000664000567000056710000004342113072744706023422 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Various HBase helpers """ import copy import datetime import json import bson.json_util from happybase.hbase import ttypes from oslo_log import log import six from ceilometer.i18n import _ from ceilometer import utils LOG = log.getLogger(__name__) EVENT_TRAIT_TYPES = {'none': 0, 'string': 1, 'integer': 2, 'float': 3, 'datetime': 4} OP_SIGN = {'eq': '=', 'lt': '<', 'le': '<=', 'ne': '!=', 'gt': '>', 'ge': '>='} # We need this additional dictionary because we have reverted timestamp in # row-keys for stored metrics OP_SIGN_REV = {'eq': '=', 'lt': '>', 'le': '>=', 'ne': '!=', 'gt': '<', 'ge': '<='} def _QualifierFilter(op, qualifier): return "QualifierFilter (%s, 'binaryprefix:m_%s')" % (op, qualifier) def timestamp(dt, reverse=True): """Timestamp is count of milliseconds since start of epoch. If reverse=True then timestamp will be reversed. Such a technique is used in HBase rowkey design when period queries are required. Because of the fact that rows are sorted lexicographically it's possible to vary whether the 'oldest' entries will be on top of the table or it should be the newest ones (reversed timestamp case). :param dt: datetime which is translated to timestamp :param reverse: a boolean parameter for reverse or straight count of timestamp in milliseconds :return: count or reversed count of milliseconds since start of epoch """ epoch = datetime.datetime(1970, 1, 1) td = dt - epoch ts = td.microseconds + td.seconds * 1000000 + td.days * 86400000000 return 0x7fffffffffffffff - ts if reverse else ts def make_events_query_from_filter(event_filter): """Return start and stop row for filtering and a query. Query is based on the selected parameter. :param event_filter: storage.EventFilter object. """ start = "%s" % (timestamp(event_filter.start_timestamp, reverse=False) if event_filter.start_timestamp else "") stop = "%s" % (timestamp(event_filter.end_timestamp, reverse=False) if event_filter.end_timestamp else "") kwargs = {'event_type': event_filter.event_type, 'event_id': event_filter.message_id} res_q = make_query(**kwargs) if event_filter.traits_filter: for trait_filter in event_filter.traits_filter: q_trait = make_query(trait_query=True, **trait_filter) if q_trait: if res_q: res_q += " AND " + q_trait else: res_q = q_trait return res_q, start, stop def make_timestamp_query(func, start=None, start_op=None, end=None, end_op=None, bounds_only=False, **kwargs): """Return a filter start and stop row for filtering and a query. Query is based on the fact that CF-name is 'rts'. :param start: Optional start timestamp :param start_op: Optional start timestamp operator, like gt, ge :param end: Optional end timestamp :param end_op: Optional end timestamp operator, like lt, le :param bounds_only: if True than query will not be returned :param func: a function that provide a format of row :param kwargs: kwargs for :param func """ # We don't need to dump here because get_start_end_rts returns strings rts_start, rts_end = get_start_end_rts(start, end) start_row, end_row = func(rts_start, rts_end, **kwargs) if bounds_only: return start_row, end_row q = [] start_op = start_op or 'ge' end_op = end_op or 'lt' if rts_start: q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" % (OP_SIGN_REV[start_op], rts_start)) if rts_end: q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" % (OP_SIGN_REV[end_op], rts_end)) res_q = None if len(q): res_q = " AND ".join(q) return start_row, end_row, res_q def get_start_end_rts(start, end): rts_start = str(timestamp(start)) if start else "" rts_end = str(timestamp(end)) if end else "" return rts_start, rts_end def make_query(metaquery=None, trait_query=None, **kwargs): """Return a filter query string based on the selected parameters. :param metaquery: optional metaquery dict :param trait_query: optional boolean, for trait_query from kwargs :param kwargs: key-value pairs to filter on. Key should be a real column name in db """ q = [] res_q = None # Query for traits differs from others. It is constructed with # SingleColumnValueFilter with the possibility to choose comparison # operator if trait_query: trait_name = kwargs.pop('key') op = kwargs.pop('op', 'eq') for k, v in kwargs.items(): if v is not None: res_q = ("SingleColumnValueFilter " "('f', '%s', %s, 'binary:%s', true, true)" % (prepare_key(trait_name, EVENT_TRAIT_TYPES[k]), OP_SIGN[op], dump(v))) return res_q # Note: we use extended constructor for SingleColumnValueFilter here. # It is explicitly specified that entry should not be returned if CF is not # found in table. for key, value in sorted(kwargs.items()): if value is not None: if key == 'source': q.append("SingleColumnValueFilter " "('f', 's_%s', =, 'binary:%s', true, true)" % (value, dump('1'))) elif key == 'trait_type': q.append("ColumnPrefixFilter('%s')" % value) elif key == 'event_id': q.append("RowFilter ( = , 'regexstring:\d*:%s')" % value) else: q.append("SingleColumnValueFilter " "('f', '%s', =, 'binary:%s', true, true)" % (quote(key), dump(value))) res_q = None if len(q): res_q = " AND ".join(q) if metaquery: meta_q = [] for k, v in metaquery.items(): meta_q.append( "SingleColumnValueFilter ('f', '%s', =, 'binary:%s', " "true, true)" % ('r_' + k, dump(v))) meta_q = " AND ".join(meta_q) # join query and metaquery if res_q is not None: res_q += " AND " + meta_q else: res_q = meta_q # metaquery only return res_q def get_meter_columns(metaquery=None, need_timestamp=False, **kwargs): """Return a list of required columns in meter table to be scanned. SingleColumnFilter has 'columns' filter that should be used to determine what columns we are interested in. But if we want to use 'filter' and 'columns' together we have to include columns we are filtering by to columns list. Please see an example: If we make scan with filter "SingleColumnValueFilter ('f', 's_test-1', =, 'binary:\"1\"')" and columns ['f:rts'], the output will be always empty because only 'rts' will be returned and filter will be applied to this data so 's_test-1' cannot be find. To make this request correct it should be fixed as follows: filter = "SingleColumnValueFilter ('f', 's_test-1', =, 'binary:\"1\"')", columns = ['f:rts','f:s_test-1']} :param metaquery: optional metaquery dict :param need_timestamp: flag, which defines the need for timestamp columns :param kwargs: key-value pairs to filter on. Key should be a real column name in db """ columns = ['f:message', 'f:recorded_at'] columns.extend("f:%s" % k for k, v in kwargs.items() if v is not None) if metaquery: columns.extend("f:r_%s" % k for k, v in metaquery.items() if v is not None) source = kwargs.get('source') if source: columns.append("f:s_%s" % source) if need_timestamp: columns.extend(['f:rts', 'f:timestamp']) return columns def make_sample_query_from_filter(sample_filter, require_meter=True): """Return a query dictionary based on the settings in the filter. :param sample_filter: SampleFilter instance :param require_meter: If true and the filter does not have a meter, raise an error. """ meter = sample_filter.meter if not meter and require_meter: raise RuntimeError('Missing required meter specifier') start_row, end_row, ts_query = make_timestamp_query( make_general_rowkey_scan, start=sample_filter.start_timestamp, start_op=sample_filter.start_timestamp_op, end=sample_filter.end_timestamp, end_op=sample_filter.end_timestamp_op, some_id=meter) kwargs = dict(user_id=sample_filter.user, project_id=sample_filter.project, counter_name=meter, resource_id=sample_filter.resource, source=sample_filter.source, message_id=sample_filter.message_id) q = make_query(metaquery=sample_filter.metaquery, **kwargs) if q: res_q = q + " AND " + ts_query if ts_query else q else: res_q = ts_query if ts_query else None need_timestamp = (sample_filter.start_timestamp or sample_filter.end_timestamp) is not None columns = get_meter_columns(metaquery=sample_filter.metaquery, need_timestamp=need_timestamp, **kwargs) return res_q, start_row, end_row, columns def make_meter_query_for_resource(start_timestamp, start_timestamp_op, end_timestamp, end_timestamp_op, source, query=None): """This method is used when Resource table should be filtered by meters. In this method we are looking into all qualifiers with m_ prefix. :param start_timestamp: meter's timestamp start range. :param start_timestamp_op: meter's start time operator, like ge, gt. :param end_timestamp: meter's timestamp end range. :param end_timestamp_op: meter's end time operator, like lt, le. :param source: source filter. :param query: a query string to concatenate with. """ start_rts, end_rts = get_start_end_rts(start_timestamp, end_timestamp) mq = [] start_op = start_timestamp_op or 'ge' end_op = end_timestamp_op or 'lt' if start_rts: filter_value = (start_rts + ':' + quote(source) if source else start_rts) mq.append(_QualifierFilter(OP_SIGN_REV[start_op], filter_value)) if end_rts: filter_value = (end_rts + ':' + quote(source) if source else end_rts) mq.append(_QualifierFilter(OP_SIGN_REV[end_op], filter_value)) if mq: meter_q = " AND ".join(mq) # If there is a filtering on time_range we need to point that # qualifiers should start with m_. Overwise in case e.g. # QualifierFilter (>=, 'binaryprefix:m_9222030811134775808') # qualifier 's_test' satisfies the filter and will be returned. meter_q = _QualifierFilter("=", '') + " AND " + meter_q query = meter_q if not query else query + " AND " + meter_q return query def make_general_rowkey_scan(rts_start=None, rts_end=None, some_id=None): """If it's filter on some_id without start and end. start_row = some_id while end_row = some_id + MAX_BYTE. """ if some_id is None: return None, None if not rts_start: # NOTE(idegtiarov): Here we could not use chr > 122 because chr >= 123 # will be quoted and character will be turn in a composition that is # started with '%' (chr(37)) that lexicographically is less then chr # of number rts_start = chr(122) end_row = prepare_key(some_id, rts_start) start_row = prepare_key(some_id, rts_end) return start_row, end_row def prepare_key(*args): """Prepares names for rows and columns with correct separator. :param args: strings or numbers that we want our key construct of :return: key with quoted args that are separated with character ":" """ key_quote = [] for key in args: if isinstance(key, six.integer_types): key = str(key) key_quote.append(quote(key)) return ":".join(key_quote) def timestamp_from_record_tuple(record): """Extract timestamp from HBase tuple record.""" return record[0]['timestamp'] def resource_id_from_record_tuple(record): """Extract resource_id from HBase tuple record.""" return record[0]['resource_id'] def deserialize_entry(entry, get_raw_meta=True): """Return a list of flatten_result, sources, meters and metadata. Flatten_result contains a dict of simple structures such as 'resource_id':1 sources/meters are the lists of sources and meters correspondingly. metadata is metadata dict. This dict may be returned as flattened if get_raw_meta is False. :param entry: entry from HBase, without row name and timestamp :param get_raw_meta: If true then raw metadata will be returned, if False metadata will be constructed from 'f:r_metadata.' fields """ flatten_result = {} sources = [] meters = [] metadata_flattened = {} for k, v in entry.items(): if k.startswith('f:s_'): sources.append(decode_unicode(k[4:])) elif k.startswith('f:r_metadata.'): qualifier = decode_unicode(k[len('f:r_metadata.'):]) metadata_flattened[qualifier] = load(v) elif k.startswith("f:m_"): meter = ([unquote(i) for i in k[4:].split(':')], load(v)) meters.append(meter) else: if ':' in k[2:]: key = tuple([unquote(i) for i in k[2:].split(':')]) else: key = unquote(k[2:]) flatten_result[key] = load(v) if get_raw_meta: metadata = flatten_result.get('resource_metadata', {}) else: metadata = metadata_flattened return flatten_result, meters, metadata def serialize_entry(data=None, **kwargs): """Return a dict that is ready to be stored to HBase :param data: dict to be serialized :param kwargs: additional args """ data = data or {} entry_dict = copy.copy(data) entry_dict.update(**kwargs) result = {} for k, v in entry_dict.items(): if k == 'source': # user, project and resource tables may contain several sources. # Besides, resource table may contain several meters. # To make insertion safe we need to store all meters and sources in # a separate cell. For this purpose s_ and m_ prefixes are # introduced. qualifier = encode_unicode('f:s_%s' % v) result[qualifier] = dump('1') elif k == 'meter': for meter, ts in v.items(): qualifier = encode_unicode('f:m_%s' % meter) result[qualifier] = dump(ts) elif k == 'resource_metadata': # keep raw metadata as well as flattened to provide # capability with API v2. It will be flattened in another # way on API level. But we need flattened too for quick filtering. flattened_meta = dump_metadata(v) for key, m in flattened_meta.items(): metadata_qualifier = encode_unicode('f:r_metadata.' + key) result[metadata_qualifier] = dump(m) result['f:resource_metadata'] = dump(v) else: result['f:' + quote(k, ':')] = dump(v) return result def dump_metadata(meta): resource_metadata = {} for key, v in utils.dict_to_keyval(meta): resource_metadata[key] = v return resource_metadata def dump(data): return json.dumps(data, default=bson.json_util.default) def load(data): return json.loads(data, object_hook=object_hook) def encode_unicode(data): return data.encode('utf-8') if isinstance(data, six.text_type) else data def decode_unicode(data): return data.decode('utf-8') if isinstance(data, six.string_types) else data # We don't want to have tzinfo in decoded json.This object_hook is # overwritten json_util.object_hook for $date def object_hook(dct): if "$date" in dct: dt = bson.json_util.object_hook(dct) return dt.replace(tzinfo=None) return bson.json_util.object_hook(dct) def create_tables(conn, tables, column_families): for table in tables: try: conn.create_table(table, column_families) except ttypes.AlreadyExists: if conn.table_prefix: table = ("%(table_prefix)s" "%(separator)s" "%(table_name)s" % dict(table_prefix=conn.table_prefix, separator=conn.table_prefix_separator, table_name=table)) LOG.warning(_("Cannot create table %(table_name)s " "it already exists. Ignoring error") % {'table_name': table}) def quote(s, *args): """Return quoted string even if it is unicode one. :param s: string that should be quoted :param args: any symbol we want to stay unquoted """ s_en = s.encode('utf8') return six.moves.urllib.parse.quote(s_en, *args) def unquote(s): """Return unquoted and decoded string. :param s: string that should be unquoted """ s_de = six.moves.urllib.parse.unquote(s) return s_de.decode('utf8') ceilometer-6.1.5/ceilometer/storage/hbase/__init__.py0000664000567000056710000000000013072744703024000 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/storage/pymongo_base.py0000664000567000056710000001531613072744706023664 0ustar jenkinsjenkins00000000000000# # Copyright Ericsson AB 2013. All rights reserved # # Authors: Ildiko Vancsa # Balazs Gibizer # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common functions for MongoDB and DB2 backends """ import pymongo from ceilometer.storage import base from ceilometer.storage import models from ceilometer.storage.mongo import utils as pymongo_utils from ceilometer import utils COMMON_AVAILABLE_CAPABILITIES = { 'meters': {'query': {'simple': True, 'metadata': True}}, 'samples': {'query': {'simple': True, 'metadata': True, 'complex': True}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } class Connection(base.Connection): """Base Connection class for MongoDB and DB2 drivers.""" CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, COMMON_AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) def get_meters(self, user=None, project=None, resource=None, source=None, metaquery=None, limit=None, unique=False): """Return an iterable of models.Meter instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param resource: Optional resource filter. :param source: Optional source filter. :param metaquery: Optional dict with metadata to match on. :param limit: Maximum number of results to return. :param unique: If set to true, return only unique meter information. """ if limit == 0: return metaquery = pymongo_utils.improve_keys(metaquery, metaquery=True) or {} q = {} if user == 'None': q['user_id'] = None elif user is not None: q['user_id'] = user if project == 'None': q['project_id'] = None elif project is not None: q['project_id'] = project if resource == 'None': q['_id'] = None elif resource is not None: q['_id'] = resource if source is not None: q['source'] = source q.update(metaquery) count = 0 if unique: meter_names = set() for r in self.db.resource.find(q): for r_meter in r['meter']: if unique: if r_meter['counter_name'] in meter_names: continue else: meter_names.add(r_meter['counter_name']) if limit and count >= limit: return else: count += 1 if unique: yield models.Meter( name=r_meter['counter_name'], type=r_meter['counter_type'], # Return empty string if 'counter_unit' is not valid # for backward compatibility. unit=r_meter.get('counter_unit', ''), resource_id=None, project_id=None, source=None, user_id=None) else: yield models.Meter( name=r_meter['counter_name'], type=r_meter['counter_type'], # Return empty string if 'counter_unit' is not valid # for backward compatibility. unit=r_meter.get('counter_unit', ''), resource_id=r['_id'], project_id=r['project_id'], source=r['source'], user_id=r['user_id']) def get_samples(self, sample_filter, limit=None): """Return an iterable of model.Sample instances. :param sample_filter: Filter. :param limit: Maximum number of results to return. """ if limit == 0: return [] q = pymongo_utils.make_query_from_filter(sample_filter, require_meter=False) return self._retrieve_samples(q, [("timestamp", pymongo.DESCENDING)], limit) def query_samples(self, filter_expr=None, orderby=None, limit=None): if limit == 0: return [] query_filter = {} orderby_filter = [("timestamp", pymongo.DESCENDING)] transformer = pymongo_utils.QueryTransformer() if orderby is not None: orderby_filter = transformer.transform_orderby(orderby) if filter_expr is not None: query_filter = transformer.transform_filter(filter_expr) return self._retrieve_samples(query_filter, orderby_filter, limit) def _retrieve_samples(self, query, orderby, limit): if limit is not None: samples = self.db.meter.find(query, limit=limit, sort=orderby) else: samples = self.db.meter.find(query, sort=orderby) for s in samples: # Remove the ObjectId generated by the database when # the sample was inserted. It is an implementation # detail that should not leak outside of the driver. del s['_id'] # Backward compatibility for samples without units s['counter_unit'] = s.get('counter_unit', '') # Compatibility with MongoDB 3.+ s['counter_volume'] = float(s.get('counter_volume')) # Tolerate absence of recorded_at in older datapoints s['recorded_at'] = s.get('recorded_at') # Check samples for metadata and "unquote" key if initially it # was started with '$'. if s.get('resource_metadata'): s['resource_metadata'] = pymongo_utils.unquote_keys( s.get('resource_metadata')) yield models.Sample(**s) ceilometer-6.1.5/ceilometer/storage/impl_sqlalchemy.py0000664000567000056710000010763513072744706024373 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SQLAlchemy storage backend.""" from __future__ import absolute_import import datetime import hashlib import os from oslo_config import cfg from oslo_db import api from oslo_db import exception as dbexc from oslo_db.sqlalchemy import session as db_session from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import timeutils import six import sqlalchemy as sa from sqlalchemy import and_ from sqlalchemy import distinct from sqlalchemy import func from sqlalchemy.orm import aliased from sqlalchemy.sql.expression import cast import ceilometer from ceilometer.i18n import _, _LI from ceilometer import storage from ceilometer.storage import base from ceilometer.storage import models as api_models from ceilometer.storage.sqlalchemy import models from ceilometer.storage.sqlalchemy import utils as sql_utils from ceilometer import utils LOG = log.getLogger(__name__) STANDARD_AGGREGATES = dict( avg=func.avg(models.Sample.volume).label('avg'), sum=func.sum(models.Sample.volume).label('sum'), min=func.min(models.Sample.volume).label('min'), max=func.max(models.Sample.volume).label('max'), count=func.count(models.Sample.volume).label('count') ) UNPARAMETERIZED_AGGREGATES = dict( stddev=func.stddev_pop(models.Sample.volume).label('stddev') ) PARAMETERIZED_AGGREGATES = dict( validate=dict( cardinality=lambda p: p in ['resource_id', 'user_id', 'project_id'] ), compute=dict( cardinality=lambda p: func.count( distinct(getattr(models.Resource, p)) ).label('cardinality/%s' % p) ) ) AVAILABLE_CAPABILITIES = { 'meters': {'query': {'simple': True, 'metadata': True}}, 'resources': {'query': {'simple': True, 'metadata': True}}, 'samples': {'query': {'simple': True, 'metadata': True, 'complex': True}}, 'statistics': {'groupby': True, 'query': {'simple': True, 'metadata': True}, 'aggregation': {'standard': True, 'selectable': { 'max': True, 'min': True, 'sum': True, 'avg': True, 'count': True, 'stddev': True, 'cardinality': True}} }, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } def apply_metaquery_filter(session, query, metaquery): """Apply provided metaquery filter to existing query. :param session: session used for original query :param query: Query instance :param metaquery: dict with metadata to match on. """ for k, value in six.iteritems(metaquery): key = k[9:] # strip out 'metadata.' prefix try: _model = sql_utils.META_TYPE_MAP[type(value)] except KeyError: raise ceilometer.NotImplementedError( 'Query on %(key)s is of %(value)s ' 'type and is not supported' % {"key": k, "value": type(value)}) else: meta_alias = aliased(_model) on_clause = and_(models.Resource.internal_id == meta_alias.id, meta_alias.meta_key == key) # outer join is needed to support metaquery # with or operator on non existent metadata field # see: test_query_non_existing_metadata_with_result # test case. query = query.outerjoin(meta_alias, on_clause) query = query.filter(meta_alias.value == value) return query def make_query_from_filter(session, query, sample_filter, require_meter=True): """Return a query dictionary based on the settings in the filter. :param session: session used for original query :param query: Query instance :param sample_filter: SampleFilter instance :param require_meter: If true and the filter does not have a meter, raise an error. """ if sample_filter.meter: query = query.filter(models.Meter.name == sample_filter.meter) elif require_meter: raise RuntimeError('Missing required meter specifier') if sample_filter.source: query = query.filter( models.Resource.source_id == sample_filter.source) if sample_filter.start_timestamp: ts_start = sample_filter.start_timestamp if sample_filter.start_timestamp_op == 'gt': query = query.filter(models.Sample.timestamp > ts_start) else: query = query.filter(models.Sample.timestamp >= ts_start) if sample_filter.end_timestamp: ts_end = sample_filter.end_timestamp if sample_filter.end_timestamp_op == 'le': query = query.filter(models.Sample.timestamp <= ts_end) else: query = query.filter(models.Sample.timestamp < ts_end) if sample_filter.user: if sample_filter.user == 'None': sample_filter.user = None query = query.filter(models.Resource.user_id == sample_filter.user) if sample_filter.project: if sample_filter.project == 'None': sample_filter.project = None query = query.filter( models.Resource.project_id == sample_filter.project) if sample_filter.resource: query = query.filter( models.Resource.resource_id == sample_filter.resource) if sample_filter.message_id: query = query.filter( models.Sample.message_id == sample_filter.message_id) if sample_filter.metaquery: query = apply_metaquery_filter(session, query, sample_filter.metaquery) return query class Connection(base.Connection): """Put the data into a SQLAlchemy database. Tables:: - meter - meter definition - { id: meter id name: meter name type: meter type unit: meter unit } - resource - resource definition - { internal_id: resource id resource_id: resource uuid user_id: user uuid project_id: project uuid source_id: source id resource_metadata: metadata dictionary metadata_hash: metadata dictionary hash } - sample - the raw incoming data - { id: sample id meter_id: meter id (->meter.id) resource_id: resource id (->resource.internal_id) volume: sample volume timestamp: datetime recorded_at: datetime message_signature: message signature message_id: message uuid } """ CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) def __init__(self, url): # Set max_retries to 0, since oslo.db in certain cases may attempt # to retry making the db connection retried max_retries ^ 2 times # in failure case and db reconnection has already been implemented # in storage.__init__.get_connection_from_config function options = dict(cfg.CONF.database.items()) options['max_retries'] = 0 # oslo.db doesn't support options defined by Ceilometer for opt in storage.OPTS: options.pop(opt.name, None) self._engine_facade = db_session.EngineFacade(url, **options) def upgrade(self): # NOTE(gordc): to minimise memory, only import migration when needed from oslo_db.sqlalchemy import migration path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'sqlalchemy', 'migrate_repo') engine = self._engine_facade.get_engine() from migrate import exceptions as migrate_exc from migrate.versioning import api from migrate.versioning import repository repo = repository.Repository(path) try: api.db_version(engine, repo) except migrate_exc.DatabaseNotControlledError: models.Base.metadata.create_all(engine) api.version_control(engine, repo, repo.latest) else: migration.db_sync(engine, path) def clear(self): engine = self._engine_facade.get_engine() for table in reversed(models.Base.metadata.sorted_tables): engine.execute(table.delete()) engine.dispose() @staticmethod def _create_meter(conn, name, type, unit): # TODO(gordc): implement lru_cache to improve performance try: meter = models.Meter.__table__ trans = conn.begin_nested() if conn.dialect.name == 'sqlite': trans = conn.begin() with trans: meter_row = conn.execute( sa.select([meter.c.id]) .where(sa.and_(meter.c.name == name, meter.c.type == type, meter.c.unit == unit))).first() meter_id = meter_row[0] if meter_row else None if meter_id is None: result = conn.execute(meter.insert(), name=name, type=type, unit=unit) meter_id = result.inserted_primary_key[0] except dbexc.DBDuplicateEntry: # retry function to pick up duplicate committed object meter_id = Connection._create_meter(conn, name, type, unit) return meter_id @staticmethod def _create_resource(conn, res_id, user_id, project_id, source_id, rmeta): # TODO(gordc): implement lru_cache to improve performance try: res = models.Resource.__table__ m_hash = jsonutils.dumps(rmeta, sort_keys=True) if six.PY3: m_hash = m_hash.encode('utf-8') m_hash = hashlib.md5(m_hash).hexdigest() trans = conn.begin_nested() if conn.dialect.name == 'sqlite': trans = conn.begin() with trans: res_row = conn.execute( sa.select([res.c.internal_id]) .where(sa.and_(res.c.resource_id == res_id, res.c.user_id == user_id, res.c.project_id == project_id, res.c.source_id == source_id, res.c.metadata_hash == m_hash))).first() internal_id = res_row[0] if res_row else None if internal_id is None: result = conn.execute(res.insert(), resource_id=res_id, user_id=user_id, project_id=project_id, source_id=source_id, resource_metadata=rmeta, metadata_hash=m_hash) internal_id = result.inserted_primary_key[0] if rmeta and isinstance(rmeta, dict): meta_map = {} for key, v in utils.dict_to_keyval(rmeta): try: _model = sql_utils.META_TYPE_MAP[type(v)] if meta_map.get(_model) is None: meta_map[_model] = [] meta_map[_model].append( {'id': internal_id, 'meta_key': key, 'value': v}) except KeyError: LOG.warning(_("Unknown metadata type. Key " "(%s) will not be queryable."), key) for _model in meta_map.keys(): conn.execute(_model.__table__.insert(), meta_map[_model]) except dbexc.DBDuplicateEntry: # retry function to pick up duplicate committed object internal_id = Connection._create_resource( conn, res_id, user_id, project_id, source_id, rmeta) return internal_id @api.wrap_db_retry(retry_interval=cfg.CONF.database.retry_interval, max_retries=cfg.CONF.database.max_retries, retry_on_deadlock=True) def record_metering_data(self, data): """Write the data to the backend storage system. :param data: a dictionary such as returned by ceilometer.meter.meter_message_from_counter """ engine = self._engine_facade.get_engine() with engine.begin() as conn: # Record the raw data for the sample. m_id = self._create_meter(conn, data['counter_name'], data['counter_type'], data['counter_unit']) res_id = self._create_resource(conn, data['resource_id'], data['user_id'], data['project_id'], data['source'], data['resource_metadata']) sample = models.Sample.__table__ conn.execute(sample.insert(), meter_id=m_id, resource_id=res_id, timestamp=data['timestamp'], volume=data['counter_volume'], message_signature=data['message_signature'], message_id=data['message_id']) def clear_expired_metering_data(self, ttl): """Clear expired data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep records for. """ # Prevent database deadlocks from occurring by # using separate transaction for each delete session = self._engine_facade.get_session() with session.begin(): end = timeutils.utcnow() - datetime.timedelta(seconds=ttl) sample_q = (session.query(models.Sample) .filter(models.Sample.timestamp < end)) rows = sample_q.delete() LOG.info(_LI("%d samples removed from database"), rows) if not cfg.CONF.sql_expire_samples_only: with session.begin(): # remove Meter definitions with no matching samples (session.query(models.Meter) .filter(~models.Meter.samples.any()) .delete(synchronize_session=False)) with session.begin(): resource_q = (session.query(models.Resource.internal_id) .filter(~models.Resource.samples.any())) # mark resource with no matching samples for delete resource_q.update({models.Resource.metadata_hash: "delete_" + cast(models.Resource.internal_id, sa.String)}, synchronize_session=False) # remove metadata of resources marked for delete for table in [models.MetaText, models.MetaBigInt, models.MetaFloat, models.MetaBool]: with session.begin(): resource_q = (session.query(models.Resource.internal_id) .filter(models.Resource.metadata_hash .like('delete_%'))) resource_subq = resource_q.subquery() (session.query(table) .filter(table.id.in_(resource_subq)) .delete(synchronize_session=False)) # remove resource marked for delete with session.begin(): resource_q = (session.query(models.Resource.internal_id) .filter(models.Resource.metadata_hash .like('delete_%'))) resource_q.delete(synchronize_session=False) LOG.info(_LI("Expired residual resource and" " meter definition data")) def get_resources(self, user=None, project=None, source=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, metaquery=None, resource=None, limit=None): """Return an iterable of api_models.Resource instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param start_timestamp_op: Optional start time operator, like gt, ge. :param end_timestamp: Optional modified timestamp end range. :param end_timestamp_op: Optional end time operator, like lt, le. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. :param limit: Maximum number of results to return. """ if limit == 0: return s_filter = storage.SampleFilter(user=user, project=project, source=source, start_timestamp=start_timestamp, start_timestamp_op=start_timestamp_op, end_timestamp=end_timestamp, end_timestamp_op=end_timestamp_op, metaquery=metaquery, resource=resource) session = self._engine_facade.get_session() # get list of resource_ids has_timestamp = start_timestamp or end_timestamp # NOTE: When sql_expire_samples_only is enabled, there will be some # resources without any sample, in such case we should use inner # join on sample table to avoid wrong result. if cfg.CONF.sql_expire_samples_only or has_timestamp: res_q = session.query(distinct(models.Resource.resource_id)).join( models.Sample, models.Sample.resource_id == models.Resource.internal_id) else: res_q = session.query(distinct(models.Resource.resource_id)) res_q = make_query_from_filter(session, res_q, s_filter, require_meter=False) res_q = res_q.limit(limit) if limit else res_q for res_id in res_q.all(): # get max and min sample timestamp value min_max_q = (session.query(func.max(models.Sample.timestamp) .label('max_timestamp'), func.min(models.Sample.timestamp) .label('min_timestamp')) .join(models.Resource, models.Resource.internal_id == models.Sample.resource_id) .filter(models.Resource.resource_id == res_id[0])) min_max_q = make_query_from_filter(session, min_max_q, s_filter, require_meter=False) min_max = min_max_q.first() # get resource details for latest sample res_q = (session.query(models.Resource.resource_id, models.Resource.user_id, models.Resource.project_id, models.Resource.source_id, models.Resource.resource_metadata) .join(models.Sample, models.Sample.resource_id == models.Resource.internal_id) .filter(models.Sample.timestamp == min_max.max_timestamp) .filter(models.Resource.resource_id == res_id[0]) .order_by(models.Sample.id.desc()).limit(1)) res = res_q.first() yield api_models.Resource( resource_id=res.resource_id, project_id=res.project_id, first_sample_timestamp=min_max.min_timestamp, last_sample_timestamp=min_max.max_timestamp, source=res.source_id, user_id=res.user_id, metadata=res.resource_metadata ) def get_meters(self, user=None, project=None, resource=None, source=None, metaquery=None, limit=None, unique=False): """Return an iterable of api_models.Meter instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param resource: Optional ID of the resource. :param source: Optional source filter. :param metaquery: Optional dict with metadata to match on. :param limit: Maximum number of results to return. :param unique: If set to true, return only unique meter information. """ if limit == 0: return s_filter = storage.SampleFilter(user=user, project=project, source=source, metaquery=metaquery, resource=resource) # NOTE(gordc): get latest sample of each meter/resource. we do not # filter here as we want to filter only on latest record. session = self._engine_facade.get_session() subq = session.query(func.max(models.Sample.id).label('id')).join( models.Resource, models.Resource.internal_id == models.Sample.resource_id) if unique: subq = subq.group_by(models.Sample.meter_id) else: subq = subq.group_by(models.Sample.meter_id, models.Resource.resource_id) if resource: subq = subq.filter(models.Resource.resource_id == resource) subq = subq.subquery() # get meter details for samples. query_sample = (session.query(models.Sample.meter_id, models.Meter.name, models.Meter.type, models.Meter.unit, models.Resource.resource_id, models.Resource.project_id, models.Resource.source_id, models.Resource.user_id).join( subq, subq.c.id == models.Sample.id) .join(models.Meter, models.Meter.id == models.Sample.meter_id) .join(models.Resource, models.Resource.internal_id == models.Sample.resource_id)) query_sample = make_query_from_filter(session, query_sample, s_filter, require_meter=False) query_sample = query_sample.limit(limit) if limit else query_sample if unique: for row in query_sample.all(): yield api_models.Meter( name=row.name, type=row.type, unit=row.unit, resource_id=None, project_id=None, source=None, user_id=None) else: for row in query_sample.all(): yield api_models.Meter( name=row.name, type=row.type, unit=row.unit, resource_id=row.resource_id, project_id=row.project_id, source=row.source_id, user_id=row.user_id) @staticmethod def _retrieve_samples(query): samples = query.all() for s in samples: # Remove the id generated by the database when # the sample was inserted. It is an implementation # detail that should not leak outside of the driver. yield api_models.Sample( source=s.source_id, counter_name=s.counter_name, counter_type=s.counter_type, counter_unit=s.counter_unit, counter_volume=s.counter_volume, user_id=s.user_id, project_id=s.project_id, resource_id=s.resource_id, timestamp=s.timestamp, recorded_at=s.recorded_at, resource_metadata=s.resource_metadata, message_id=s.message_id, message_signature=s.message_signature, ) def get_samples(self, sample_filter, limit=None): """Return an iterable of api_models.Samples. :param sample_filter: Filter. :param limit: Maximum number of results to return. """ if limit == 0: return [] session = self._engine_facade.get_session() query = session.query(models.Sample.timestamp, models.Sample.recorded_at, models.Sample.message_id, models.Sample.message_signature, models.Sample.volume.label('counter_volume'), models.Meter.name.label('counter_name'), models.Meter.type.label('counter_type'), models.Meter.unit.label('counter_unit'), models.Resource.source_id, models.Resource.user_id, models.Resource.project_id, models.Resource.resource_metadata, models.Resource.resource_id).join( models.Meter, models.Meter.id == models.Sample.meter_id).join( models.Resource, models.Resource.internal_id == models.Sample.resource_id).order_by( models.Sample.timestamp.desc()) query = make_query_from_filter(session, query, sample_filter, require_meter=False) if limit: query = query.limit(limit) return self._retrieve_samples(query) def query_samples(self, filter_expr=None, orderby=None, limit=None): if limit == 0: return [] session = self._engine_facade.get_session() engine = self._engine_facade.get_engine() query = session.query(models.Sample.timestamp, models.Sample.recorded_at, models.Sample.message_id, models.Sample.message_signature, models.Sample.volume.label('counter_volume'), models.Meter.name.label('counter_name'), models.Meter.type.label('counter_type'), models.Meter.unit.label('counter_unit'), models.Resource.source_id, models.Resource.user_id, models.Resource.project_id, models.Resource.resource_metadata, models.Resource.resource_id).join( models.Meter, models.Meter.id == models.Sample.meter_id).join( models.Resource, models.Resource.internal_id == models.Sample.resource_id) transformer = sql_utils.QueryTransformer(models.FullSample, query, dialect=engine.dialect.name) if filter_expr is not None: transformer.apply_filter(filter_expr) transformer.apply_options(orderby, limit) return self._retrieve_samples(transformer.get_query()) @staticmethod def _get_aggregate_functions(aggregate): if not aggregate: return [f for f in STANDARD_AGGREGATES.values()] functions = [] for a in aggregate: if a.func in STANDARD_AGGREGATES: functions.append(STANDARD_AGGREGATES[a.func]) elif a.func in UNPARAMETERIZED_AGGREGATES: functions.append(UNPARAMETERIZED_AGGREGATES[a.func]) elif a.func in PARAMETERIZED_AGGREGATES['compute']: validate = PARAMETERIZED_AGGREGATES['validate'].get(a.func) if not (validate and validate(a.param)): raise storage.StorageBadAggregate('Bad aggregate: %s.%s' % (a.func, a.param)) compute = PARAMETERIZED_AGGREGATES['compute'][a.func] functions.append(compute(a.param)) else: raise ceilometer.NotImplementedError( 'Selectable aggregate function %s' ' is not supported' % a.func) return functions def _make_stats_query(self, sample_filter, groupby, aggregate): select = [ func.min(models.Sample.timestamp).label('tsmin'), func.max(models.Sample.timestamp).label('tsmax'), models.Meter.unit ] select.extend(self._get_aggregate_functions(aggregate)) session = self._engine_facade.get_session() if groupby: group_attributes = [] for g in groupby: if g != 'resource_metadata.instance_type': group_attributes.append(getattr(models.Resource, g)) else: group_attributes.append( getattr(models.MetaText, 'value') .label('resource_metadata.instance_type')) select.extend(group_attributes) query = ( session.query(*select) .join(models.Meter, models.Meter.id == models.Sample.meter_id) .join(models.Resource, models.Resource.internal_id == models.Sample.resource_id) .group_by(models.Meter.unit)) if groupby: for g in groupby: if g == 'resource_metadata.instance_type': query = query.join( models.MetaText, models.Resource.internal_id == models.MetaText.id) query = query.filter( models.MetaText.meta_key == 'instance_type') query = query.group_by(*group_attributes) return make_query_from_filter(session, query, sample_filter) @staticmethod def _stats_result_aggregates(result, aggregate): stats_args = {} if isinstance(result.count, six.integer_types): stats_args['count'] = result.count for attr in ['min', 'max', 'sum', 'avg']: if hasattr(result, attr): stats_args[attr] = getattr(result, attr) if aggregate: stats_args['aggregate'] = {} for a in aggregate: key = '%s%s' % (a.func, '/%s' % a.param if a.param else '') stats_args['aggregate'][key] = getattr(result, key) return stats_args @staticmethod def _stats_result_to_model(result, period, period_start, period_end, groupby, aggregate): stats_args = Connection._stats_result_aggregates(result, aggregate) stats_args['unit'] = result.unit duration = (timeutils.delta_seconds(result.tsmin, result.tsmax) if result.tsmin is not None and result.tsmax is not None else None) stats_args['duration'] = duration stats_args['duration_start'] = result.tsmin stats_args['duration_end'] = result.tsmax stats_args['period'] = period stats_args['period_start'] = period_start stats_args['period_end'] = period_end stats_args['groupby'] = (dict( (g, getattr(result, g)) for g in groupby) if groupby else None) return api_models.Statistics(**stats_args) def get_meter_statistics(self, sample_filter, period=None, groupby=None, aggregate=None): """Return an iterable of api_models.Statistics instances. Items are containing meter statistics described by the query parameters. The filter must have a meter value set. """ if groupby: for group in groupby: if group not in ['user_id', 'project_id', 'resource_id', 'resource_metadata.instance_type']: raise ceilometer.NotImplementedError('Unable to group by ' 'these fields') if not period: for res in self._make_stats_query(sample_filter, groupby, aggregate): if res.count: yield self._stats_result_to_model(res, 0, res.tsmin, res.tsmax, groupby, aggregate) return if not (sample_filter.start_timestamp and sample_filter.end_timestamp): res = self._make_stats_query(sample_filter, None, aggregate).first() if not res: # NOTE(liusheng):The 'res' may be NoneType, because no # sample has found with sample filter(s). return query = self._make_stats_query(sample_filter, groupby, aggregate) # HACK(jd) This is an awful method to compute stats by period, but # since we're trying to be SQL agnostic we have to write portable # code, so here it is, admire! We're going to do one request to get # stats by period. We would like to use GROUP BY, but there's no # portable way to manipulate timestamp in SQL, so we can't. for period_start, period_end in base.iter_period( sample_filter.start_timestamp or res.tsmin, sample_filter.end_timestamp or res.tsmax, period): q = query.filter(models.Sample.timestamp >= period_start) q = q.filter(models.Sample.timestamp < period_end) for r in q.all(): if r.count: yield self._stats_result_to_model( result=r, period=int(timeutils.delta_seconds(period_start, period_end)), period_start=period_start, period_end=period_end, groupby=groupby, aggregate=aggregate ) ceilometer-6.1.5/ceilometer/storage/mongo/0000775000567000056710000000000013072745164021740 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/storage/mongo/utils.py0000664000567000056710000005657213072744706023472 0ustar jenkinsjenkins00000000000000# # Copyright Ericsson AB 2013. All rights reserved # # Authors: Ildiko Vancsa # Balazs Gibizer # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common functions for MongoDB and DB2 backends """ import datetime import time import weakref from oslo_config import cfg from oslo_log import log from oslo_utils import netutils import pymongo import pymongo.errors import six from six.moves.urllib import parse from ceilometer.i18n import _, _LI ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS = 86 LOG = log.getLogger(__name__) EVENT_TRAIT_TYPES = {'none': 0, 'string': 1, 'integer': 2, 'float': 3, 'datetime': 4} OP_SIGN = {'lt': '$lt', 'le': '$lte', 'ne': '$ne', 'gt': '$gt', 'ge': '$gte'} MINIMUM_COMPATIBLE_MONGODB_VERSION = [2, 4] COMPLETE_AGGREGATE_COMPATIBLE_VERSION = [2, 6] FINALIZE_FLOAT_LAMBDA = lambda result, param=None: float(result) FINALIZE_INT_LAMBDA = lambda result, param=None: int(result) CARDINALITY_VALIDATION = (lambda name, param: param in ['resource_id', 'user_id', 'project_id', 'source']) def make_timestamp_range(start, end, start_timestamp_op=None, end_timestamp_op=None): """Create the query document to find timestamps within that range. This is done by given two possible datetimes and their operations. By default, using $gte for the lower bound and $lt for the upper bound. """ ts_range = {} if start: if start_timestamp_op == 'gt': start_timestamp_op = '$gt' else: start_timestamp_op = '$gte' ts_range[start_timestamp_op] = start if end: if end_timestamp_op == 'le': end_timestamp_op = '$lte' else: end_timestamp_op = '$lt' ts_range[end_timestamp_op] = end return ts_range def make_events_query_from_filter(event_filter): """Return start and stop row for filtering and a query. Query is based on the selected parameter. :param event_filter: storage.EventFilter object. """ query = {} q_list = [] ts_range = make_timestamp_range(event_filter.start_timestamp, event_filter.end_timestamp) if ts_range: q_list.append({'timestamp': ts_range}) if event_filter.event_type: q_list.append({'event_type': event_filter.event_type}) if event_filter.message_id: q_list.append({'_id': event_filter.message_id}) if event_filter.traits_filter: for trait_filter in event_filter.traits_filter: op = trait_filter.pop('op', 'eq') dict_query = {} for k, v in six.iteritems(trait_filter): if v is not None: # All parameters in EventFilter['traits'] are optional, so # we need to check if they are in the query or no. if k == 'key': dict_query.setdefault('trait_name', v) elif k in ['string', 'integer', 'datetime', 'float']: dict_query.setdefault('trait_type', EVENT_TRAIT_TYPES[k]) dict_query.setdefault('trait_value', v if op == 'eq' else {OP_SIGN[op]: v}) dict_query = {'$elemMatch': dict_query} q_list.append({'traits': dict_query}) if event_filter.admin_proj: q_list.append({'$or': [ {'traits': {'$not': {'$elemMatch': {'trait_name': 'project_id'}}}}, {'traits': { '$elemMatch': {'trait_name': 'project_id', 'trait_value': event_filter.admin_proj}}}]}) if q_list: query = {'$and': q_list} return query def make_query_from_filter(sample_filter, require_meter=True): """Return a query dictionary based on the settings in the filter. :param sample_filter: SampleFilter instance :param require_meter: If true and the filter does not have a meter, raise an error. """ q = {} if sample_filter.user: q['user_id'] = sample_filter.user if sample_filter.project: q['project_id'] = sample_filter.project if sample_filter.meter: q['counter_name'] = sample_filter.meter elif require_meter: raise RuntimeError('Missing required meter specifier') ts_range = make_timestamp_range(sample_filter.start_timestamp, sample_filter.end_timestamp, sample_filter.start_timestamp_op, sample_filter.end_timestamp_op) if ts_range: q['timestamp'] = ts_range if sample_filter.resource: q['resource_id'] = sample_filter.resource if sample_filter.source: q['source'] = sample_filter.source if sample_filter.message_id: q['message_id'] = sample_filter.message_id # so the samples call metadata resource_metadata, so we convert # to that. q.update(dict( ('resource_%s' % k, v) for (k, v) in six.iteritems( improve_keys(sample_filter.metaquery, metaquery=True)))) return q def quote_key(key, reverse=False): """Prepare key for storage data in MongoDB. :param key: key that should be quoted :param reverse: boolean, True --- if we need a reverse order of the keys parts :return: iter of quoted part of the key """ r = -1 if reverse else 1 for k in key.split('.')[::r]: if k.startswith('$'): k = parse.quote(k) yield k def improve_keys(data, metaquery=False): """Improves keys in dict if they contained '.' or started with '$'. :param data: is a dictionary where keys need to be checked and improved :param metaquery: boolean, if True dots are not escaped from the keys :return: improved dictionary if keys contained dots or started with '$': {'a.b': 'v'} -> {'a': {'b': 'v'}} {'$ab': 'v'} -> {'%24ab': 'v'} """ if not isinstance(data, dict): return data if metaquery: for key in six.iterkeys(data): if '.$' in key: key_list = [] for k in quote_key(key): key_list.append(k) new_key = '.'.join(key_list) data[new_key] = data.pop(key) else: for key, value in data.items(): if isinstance(value, dict): improve_keys(value) if '.' in key: new_dict = {} for k in quote_key(key, reverse=True): new = {} new[k] = new_dict if new_dict else data.pop(key) new_dict = new data.update(new_dict) else: if key.startswith('$'): new_key = parse.quote(key) data[new_key] = data.pop(key) return data def unquote_keys(data): """Restores initial view of 'quoted' keys in dictionary data :param data: is a dictionary :return: data with restored keys if they were 'quoted'. """ if isinstance(data, dict): for key, value in data.items(): if isinstance(value, dict): unquote_keys(value) if key.startswith('%24'): k = parse.unquote(key) data[k] = data.pop(key) return data class ConnectionPool(object): def __init__(self): self._pool = {} def connect(self, url): connection_options = pymongo.uri_parser.parse_uri(url) del connection_options['database'] del connection_options['username'] del connection_options['password'] del connection_options['collection'] pool_key = tuple(connection_options) if pool_key in self._pool: client = self._pool.get(pool_key)() if client: return client splitted_url = netutils.urlsplit(url) log_data = {'db': splitted_url.scheme, 'nodelist': connection_options['nodelist']} LOG.info(_LI('Connecting to %(db)s on %(nodelist)s') % log_data) client = self._mongo_connect(url) self._pool[pool_key] = weakref.ref(client) return client @staticmethod def _mongo_connect(url): try: return MongoProxy(pymongo.MongoClient(url)) except pymongo.errors.ConnectionFailure as e: LOG.warning(_('Unable to connect to the database server: ' '%(errmsg)s.') % {'errmsg': e}) raise class QueryTransformer(object): operators = {"<": "$lt", ">": "$gt", "<=": "$lte", "=<": "$lte", ">=": "$gte", "=>": "$gte", "!=": "$ne", "in": "$in", "=~": "$regex"} complex_operators = {"or": "$or", "and": "$and"} ordering_functions = {"asc": pymongo.ASCENDING, "desc": pymongo.DESCENDING} def transform_orderby(self, orderby): orderby_filter = [] for field in orderby: field_name = list(field.keys())[0] ordering = self.ordering_functions[list(field.values())[0]] orderby_filter.append((field_name, ordering)) return orderby_filter @staticmethod def _move_negation_to_leaf(condition): """Moves every not operator to the leafs. Moving is going by applying the De Morgan rules and annihilating double negations. """ def _apply_de_morgan(tree, negated_subtree, negated_op): if negated_op == "and": new_op = "or" else: new_op = "and" tree[new_op] = [{"not": child} for child in negated_subtree[negated_op]] del tree["not"] def transform(subtree): op = list(subtree.keys())[0] if op in ["and", "or"]: [transform(child) for child in subtree[op]] elif op == "not": negated_tree = subtree[op] negated_op = list(negated_tree.keys())[0] if negated_op == "and": _apply_de_morgan(subtree, negated_tree, negated_op) transform(subtree) elif negated_op == "or": _apply_de_morgan(subtree, negated_tree, negated_op) transform(subtree) elif negated_op == "not": # two consecutive not annihilates themselves value = list(negated_tree.values())[0] new_op = list(value.keys())[0] subtree[new_op] = negated_tree[negated_op][new_op] del subtree["not"] transform(subtree) transform(condition) def transform_filter(self, condition): # in Mongo not operator can only be applied to # simple expressions so we have to move every # not operator to the leafs of the expression tree self._move_negation_to_leaf(condition) return self._process_json_tree(condition) def _handle_complex_op(self, complex_op, nodes): element_list = [] for node in nodes: element = self._process_json_tree(node) element_list.append(element) complex_operator = self.complex_operators[complex_op] op = {complex_operator: element_list} return op def _handle_not_op(self, negated_tree): # assumes that not is moved to the leaf already # so we are next to a leaf negated_op = list(negated_tree.keys())[0] negated_field = list(negated_tree[negated_op].keys())[0] value = negated_tree[negated_op][negated_field] if negated_op == "=": return {negated_field: {"$ne": value}} elif negated_op == "!=": return {negated_field: value} else: return {negated_field: {"$not": {self.operators[negated_op]: value}}} def _handle_simple_op(self, simple_op, nodes): field_name = list(nodes.keys())[0] field_value = list(nodes.values())[0] # no operator for equal in Mongo if simple_op == "=": op = {field_name: field_value} return op operator = self.operators[simple_op] op = {field_name: {operator: field_value}} return op def _process_json_tree(self, condition_tree): operator_node = list(condition_tree.keys())[0] nodes = list(condition_tree.values())[0] if operator_node in self.complex_operators: return self._handle_complex_op(operator_node, nodes) if operator_node == "not": negated_tree = condition_tree[operator_node] return self._handle_not_op(negated_tree) return self._handle_simple_op(operator_node, nodes) def safe_mongo_call(call): def closure(*args, **kwargs): # NOTE(idegtiarov) options max_retries and retry_interval have been # registered in storage.__init__ in oslo_db.options.set_defaults # default values for both options are 10. max_retries = cfg.CONF.database.max_retries retry_interval = cfg.CONF.database.retry_interval attempts = 0 while True: try: return call(*args, **kwargs) except pymongo.errors.AutoReconnect as err: if 0 <= max_retries <= attempts: LOG.error(_('Unable to reconnect to the primary mongodb ' 'after %(retries)d retries. Giving up.') % {'retries': max_retries}) raise LOG.warning(_('Unable to reconnect to the primary ' 'mongodb: %(errmsg)s. Trying again in ' '%(retry_interval)d seconds.') % {'errmsg': err, 'retry_interval': retry_interval}) attempts += 1 time.sleep(retry_interval) return closure class MongoConn(object): def __init__(self, method): self.method = method @safe_mongo_call def __call__(self, *args, **kwargs): return self.method(*args, **kwargs) MONGO_METHODS = set([typ for typ in dir(pymongo.collection.Collection) if not typ.startswith('_')]) MONGO_METHODS.update(set([typ for typ in dir(pymongo.MongoClient) if not typ.startswith('_')])) MONGO_METHODS.update(set([typ for typ in dir(pymongo) if not typ.startswith('_')])) class MongoProxy(object): def __init__(self, conn): self.conn = conn def __getitem__(self, item): """Create and return proxy around the method in the connection. :param item: name of the connection """ return MongoProxy(self.conn[item]) def find(self, *args, **kwargs): # We need this modifying method to return a CursorProxy object so that # we can handle the Cursor next function to catch the AutoReconnect # exception. return CursorProxy(self.conn.find(*args, **kwargs)) def create_index(self, keys, name=None, *args, **kwargs): try: self.conn.create_index(keys, name=name, *args, **kwargs) except pymongo.errors.OperationFailure as e: if e.code is ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS: LOG.info(_LI("Index %s will be recreate.") % name) self._recreate_index(keys, name, *args, **kwargs) @safe_mongo_call def _recreate_index(self, keys, name, *args, **kwargs): self.conn.drop_index(name) self.conn.create_index(keys, name=name, *args, **kwargs) def __getattr__(self, item): """Wrap MongoDB connection. If item is the name of an executable method, for example find or insert, wrap this method in the MongoConn. Else wrap getting attribute with MongoProxy. """ if item in ('name', 'database'): return getattr(self.conn, item) if item in MONGO_METHODS: return MongoConn(getattr(self.conn, item)) return MongoProxy(getattr(self.conn, item)) def __call__(self, *args, **kwargs): return self.conn(*args, **kwargs) class CursorProxy(pymongo.cursor.Cursor): def __init__(self, cursor): self.cursor = cursor def __getitem__(self, item): return self.cursor[item] @safe_mongo_call def next(self): """Wrap Cursor next method. This method will be executed before each Cursor next method call. """ try: save_cursor = self.cursor.clone() return self.cursor.next() except pymongo.errors.AutoReconnect: self.cursor = save_cursor raise def __getattr__(self, item): return getattr(self.cursor, item) class AggregationFields(object): def __init__(self, version, group, project, finalize=None, parametrized=False, validate=None): self._finalize = finalize or FINALIZE_FLOAT_LAMBDA self.group = lambda *args: group(*args) if parametrized else group self.project = (lambda *args: project(*args) if parametrized else project) self.version = version self.validate = validate or (lambda name, param: True) def finalize(self, name, data, param=None): field = ("%s" % name) + ("/%s" % param if param else "") return {field: (self._finalize(data.get(field)) if self._finalize else data.get(field))} class Aggregation(object): def __init__(self, name, aggregation_fields): self.name = name aggregation_fields = (aggregation_fields if isinstance(aggregation_fields, list) else [aggregation_fields]) self.aggregation_fields = sorted(aggregation_fields, key=lambda af: getattr(af, "version"), reverse=True) def _get_compatible_aggregation_field(self, version_array): if version_array: version_array = version_array[0:2] else: version_array = MINIMUM_COMPATIBLE_MONGODB_VERSION for aggregation_field in self.aggregation_fields: if version_array >= aggregation_field.version: return aggregation_field def group(self, param=None, version_array=None): af = self._get_compatible_aggregation_field(version_array) return af.group(param) def project(self, param=None, version_array=None): af = self._get_compatible_aggregation_field(version_array) return af.project(param) def finalize(self, data, param=None, version_array=None): af = self._get_compatible_aggregation_field(version_array) return af.finalize(self.name, data, param) def validate(self, param=None, version_array=None): af = self._get_compatible_aggregation_field(version_array) return af.validate(self.name, param) SUM_AGGREGATION = Aggregation( "sum", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, {"sum": {"$sum": "$counter_volume"}}, {"sum": "$sum"}, )) AVG_AGGREGATION = Aggregation( "avg", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, {"avg": {"$avg": "$counter_volume"}}, {"avg": "$avg"}, )) MIN_AGGREGATION = Aggregation( "min", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, {"min": {"$min": "$counter_volume"}}, {"min": "$min"}, )) MAX_AGGREGATION = Aggregation( "max", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, {"max": {"$max": "$counter_volume"}}, {"max": "$max"}, )) COUNT_AGGREGATION = Aggregation( "count", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, {"count": {"$sum": 1}}, {"count": "$count"}, FINALIZE_INT_LAMBDA)) STDDEV_AGGREGATION = Aggregation( "stddev", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, {"std_square": { "$sum": { "$multiply": ["$counter_volume", "$counter_volume"] }}, "std_count": {"$sum": 1}, "std_sum": {"$sum": "$counter_volume"}}, {"stddev": { "count": "$std_count", "sum": "$std_sum", "square_sum": "$std_square"}}, lambda stddev: ((stddev['square_sum'] * stddev['count'] - stddev["sum"] ** 2) ** 0.5 / stddev['count']))) CARDINALITY_AGGREGATION = Aggregation( "cardinality", # $cond operator available only in MongoDB 2.6+ [AggregationFields(COMPLETE_AGGREGATE_COMPATIBLE_VERSION, lambda field: ({"cardinality/%s" % field: {"$addToSet": "$%s" % field}}), lambda field: { "cardinality/%s" % field: { "$cond": [ {"$eq": ["$cardinality/%s" % field, None]}, 0, {"$size": "$cardinality/%s" % field}] }}, validate=CARDINALITY_VALIDATION, parametrized=True), AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, lambda field: ({"cardinality/%s" % field: {"$addToSet": "$%s" % field}}), lambda field: ({"cardinality/%s" % field: "$cardinality/%s" % field}), finalize=len, validate=CARDINALITY_VALIDATION, parametrized=True)] ) def to_unix_timestamp(timestamp): if isinstance(timestamp, datetime.datetime): return int(time.mktime(timestamp.timetuple())) return timestamp def from_unix_timestamp(timestamp): if (isinstance(timestamp, six.integer_types) or isinstance(timestamp, float)): return datetime.datetime.fromtimestamp(timestamp) return timestamp ceilometer-6.1.5/ceilometer/storage/mongo/__init__.py0000664000567000056710000000000013072744703024035 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/storage/__init__.py0000664000567000056710000002060313072744706022734 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Storage backend management """ from oslo_config import cfg from oslo_db import options as db_options from oslo_log import log import retrying import six import six.moves.urllib.parse as urlparse from stevedore import driver from ceilometer import utils LOG = log.getLogger(__name__) OLD_OPTS = [ cfg.StrOpt('database_connection', secret=True, help='DEPRECATED - Database connection string.', ), ] cfg.CONF.register_opts(OLD_OPTS) OPTS = [ cfg.IntOpt('metering_time_to_live', default=-1, help="Number of seconds that samples are kept " "in the database for (<= 0 means forever).", deprecated_opts=[cfg.DeprecatedOpt('time_to_live', 'database')]), cfg.IntOpt('event_time_to_live', default=-1, help=("Number of seconds that events are kept " "in the database for (<= 0 means forever).")), cfg.StrOpt('metering_connection', secret=True, help='The connection string used to connect to the metering ' 'database. (if unset, connection is used)'), cfg.StrOpt('event_connection', secret=True, help='The connection string used to connect to the event ' 'database. (if unset, connection is used)'), cfg.IntOpt('db2nosql_resource_id_maxlen', default=512, help="The max length of resources id in DB2 nosql, " "the value should be larger than len(hostname) * 2 " "as compute node's resource id is _."), ] cfg.CONF.register_opts(OPTS, group='database') CLI_OPTS = [ cfg.BoolOpt('sql-expire-samples-only', default=False, help="Indicates if expirer expires only samples. If set true," " expired samples will be deleted, but residual" " resource and meter definition data will remain.", ), ] cfg.CONF.register_cli_opts(CLI_OPTS) db_options.set_defaults(cfg.CONF) class StorageUnknownWriteError(Exception): """Error raised when an unknown error occurs while recording.""" class StorageBadVersion(Exception): """Error raised when the storage backend version is not good enough.""" class StorageBadAggregate(Exception): """Error raised when an aggregate is unacceptable to storage backend.""" code = 400 def get_connection_from_config(conf, purpose='metering'): retries = conf.database.max_retries # Convert retry_interval secs to msecs for retry decorator @retrying.retry(wait_fixed=conf.database.retry_interval * 1000, stop_max_attempt_number=retries if retries >= 0 else None) def _inner(): if conf.database_connection: conf.set_override('connection', conf.database_connection, group='database') namespace = 'ceilometer.%s.storage' % purpose url = (getattr(conf.database, '%s_connection' % purpose) or conf.database.connection) return get_connection(url, namespace) return _inner() def get_connection(url, namespace): """Return an open connection to the database.""" connection_scheme = urlparse.urlparse(url).scheme # SqlAlchemy connections specify may specify a 'dialect' or # 'dialect+driver'. Handle the case where driver is specified. engine_name = connection_scheme.split('+')[0] if engine_name == 'db2': import warnings warnings.simplefilter("always") import debtcollector debtcollector.deprecate("The DB2nosql driver is no longer supported", version="Liberty", removal_version="N*-cycle") # NOTE: translation not applied bug #1446983 LOG.debug('looking for %(name)r driver in %(namespace)r', {'name': engine_name, 'namespace': namespace}) mgr = driver.DriverManager(namespace, engine_name) return mgr.driver(url) class SampleFilter(object): """Holds the properties for building a query from a meter/sample filter. :param user: The sample owner. :param project: The sample project. :param start_timestamp: Earliest time point in the request. :param start_timestamp_op: Earliest timestamp operation in the request. :param end_timestamp: Latest time point in the request. :param end_timestamp_op: Latest timestamp operation in the request. :param resource: Optional filter for resource id. :param meter: Optional filter for meter type using the meter name. :param source: Optional source filter. :param message_id: Optional sample_id filter. :param metaquery: Optional filter on the metadata """ def __init__(self, user=None, project=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, resource=None, meter=None, source=None, message_id=None, metaquery=None): self.user = user self.project = project self.start_timestamp = utils.sanitize_timestamp(start_timestamp) self.start_timestamp_op = start_timestamp_op self.end_timestamp = utils.sanitize_timestamp(end_timestamp) self.end_timestamp_op = end_timestamp_op self.resource = resource self.meter = meter self.source = source self.metaquery = metaquery or {} self.message_id = message_id def __repr__(self): return ("" % (self.user, self.project, self.start_timestamp, self.start_timestamp_op, self.end_timestamp, self.end_timestamp_op, self.resource, self.meter, self.source, self.metaquery, self.message_id)) class EventFilter(object): """Properties for building an Event query. :param start_timestamp: UTC start datetime (mandatory) :param end_timestamp: UTC end datetime (mandatory) :param event_type: the name of the event. None for all. :param message_id: the message_id of the event. None for all. :param admin_proj: the project_id of admin role. None if non-admin user. :param traits_filter: the trait filter dicts, all of which are optional. This parameter is a list of dictionaries that specify trait values: .. code-block:: python {'key': , 'string': , 'integer': , 'datetime': , 'float': , 'op': } """ def __init__(self, start_timestamp=None, end_timestamp=None, event_type=None, message_id=None, traits_filter=None, admin_proj=None): self.start_timestamp = utils.sanitize_timestamp(start_timestamp) self.end_timestamp = utils.sanitize_timestamp(end_timestamp) self.message_id = message_id self.event_type = event_type self.traits_filter = traits_filter or [] self.admin_proj = admin_proj def __repr__(self): return ("" % (self.start_timestamp, self.end_timestamp, self.event_type, six.text_type(self.traits_filter))) ceilometer-6.1.5/ceilometer/storage/models.py0000664000567000056710000001455513072744703022466 0ustar jenkinsjenkins00000000000000# # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Model classes for use in the storage API. """ from ceilometer.storage import base class Resource(base.Model): """Something for which sample data has been collected.""" def __init__(self, resource_id, project_id, first_sample_timestamp, last_sample_timestamp, source, user_id, metadata): """Create a new resource. :param resource_id: UUID of the resource :param project_id: UUID of project owning the resource :param first_sample_timestamp: first sample timestamp captured :param last_sample_timestamp: last sample timestamp captured :param source: the identifier for the user/project id definition :param user_id: UUID of user owning the resource :param metadata: most current metadata for the resource (a dict) """ base.Model.__init__(self, resource_id=resource_id, first_sample_timestamp=first_sample_timestamp, last_sample_timestamp=last_sample_timestamp, project_id=project_id, source=source, user_id=user_id, metadata=metadata, ) class Meter(base.Model): """Definition of a meter for which sample data has been collected.""" def __init__(self, name, type, unit, resource_id, project_id, source, user_id): """Create a new meter. :param name: name of the meter :param type: type of the meter (gauge, delta, cumulative) :param unit: unit of the meter :param resource_id: UUID of the resource :param project_id: UUID of project owning the resource :param source: the identifier for the user/project id definition :param user_id: UUID of user owning the resource """ base.Model.__init__(self, name=name, type=type, unit=unit, resource_id=resource_id, project_id=project_id, source=source, user_id=user_id, ) class Sample(base.Model): """One collected data point.""" def __init__(self, source, counter_name, counter_type, counter_unit, counter_volume, user_id, project_id, resource_id, timestamp, resource_metadata, message_id, message_signature, recorded_at, ): """Create a new sample. :param source: the identifier for the user/project id definition :param counter_name: the name of the measurement being taken :param counter_type: the type of the measurement :param counter_unit: the units for the measurement :param counter_volume: the measured value :param user_id: the user that triggered the measurement :param project_id: the project that owns the resource :param resource_id: the thing on which the measurement was taken :param timestamp: the time of the measurement :param resource_metadata: extra details about the resource :param message_id: a message identifier :param recorded_at: sample record timestamp :param message_signature: a hash created from the rest of the message data """ base.Model.__init__(self, source=source, counter_name=counter_name, counter_type=counter_type, counter_unit=counter_unit, counter_volume=counter_volume, user_id=user_id, project_id=project_id, resource_id=resource_id, timestamp=timestamp, resource_metadata=resource_metadata, message_id=message_id, message_signature=message_signature, recorded_at=recorded_at) class Statistics(base.Model): """Computed statistics based on a set of sample data.""" def __init__(self, unit, period, period_start, period_end, duration, duration_start, duration_end, groupby, **data): """Create a new statistics object. :param unit: The unit type of the data set :param period: The length of the time range covered by these stats :param period_start: The timestamp for the start of the period :param period_end: The timestamp for the end of the period :param duration: The total time for the matching samples :param duration_start: The earliest time for the matching samples :param duration_end: The latest time for the matching samples :param groupby: The fields used to group the samples. :param data: some or all of the following aggregates min: The smallest volume found max: The largest volume found avg: The average of all volumes found sum: The total of all volumes found count: The number of samples found aggregate: name-value pairs for selectable aggregates """ base.Model.__init__(self, unit=unit, period=period, period_start=period_start, period_end=period_end, duration=duration, duration_start=duration_start, duration_end=duration_end, groupby=groupby, **data) ceilometer-6.1.5/ceilometer/hardware/0000775000567000056710000000000013072745164020752 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/hardware/discovery.py0000664000567000056710000000704113072744706023336 0ustar jenkinsjenkins00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from ceilometer.agent import plugin_base from ceilometer.i18n import _ from ceilometer import nova_client LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('url_scheme', default='snmp://', help='URL scheme to use for hardware nodes.'), cfg.StrOpt('readonly_user_name', default='ro_snmp_user', help='SNMPd user name of all nodes running in the cloud.'), cfg.StrOpt('readonly_user_password', default='password', help='SNMPd password of all the nodes running in the cloud.', secret=True), ] cfg.CONF.register_opts(OPTS, group='hardware') class NodesDiscoveryTripleO(plugin_base.DiscoveryBase): def __init__(self): super(NodesDiscoveryTripleO, self).__init__() self.nova_cli = nova_client.Client() self.last_run = None self.instances = {} @staticmethod def _address(instance, field): return instance.addresses['ctlplane'][0].get(field) def discover(self, manager, param=None): """Discover resources to monitor. instance_get_all will return all instances if last_run is None, and will return only the instances changed since the last_run time. """ try: instances = self.nova_cli.instance_get_all(self.last_run) except Exception: # NOTE(zqfan): instance_get_all is wrapped and will log exception # when there is any error. It is no need to raise it again and # print one more time. return [] for instance in instances: if getattr(instance, 'OS-EXT-STS:vm_state', None) in ['deleted', 'error']: self.instances.pop(instance.id, None) else: self.instances[instance.id] = instance self.last_run = timeutils.utcnow(True).isoformat() resources = [] for instance in self.instances.values(): try: ip_address = self._address(instance, 'addr') final_address = ( cfg.CONF.hardware.url_scheme + cfg.CONF.hardware.readonly_user_name + ':' + cfg.CONF.hardware.readonly_user_password + '@' + ip_address) resource = { 'resource_id': instance.id, 'resource_url': final_address, 'mac_addr': self._address(instance, 'OS-EXT-IPS-MAC:mac_addr'), 'image_id': instance.image['id'], 'flavor_id': instance.flavor['id'] } resources.append(resource) except KeyError: LOG.error(_("Couldn't obtain IP address of " "instance %s") % instance.id) return resources ceilometer-6.1.5/ceilometer/hardware/inspector/0000775000567000056710000000000013072745164022760 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/hardware/inspector/base.py0000664000567000056710000000324313072744706024247 0ustar jenkinsjenkins00000000000000# # Copyright 2014 ZHAW SoE # # Authors: Lucas Graf # Toni Zehnder # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Inspector abstraction for read-only access to hardware components""" import abc import six @six.add_metaclass(abc.ABCMeta) class Inspector(object): @abc.abstractmethod def inspect_generic(self, host, cache, extra_metadata, param): """A generic inspect function. :param host: the target host :param cache: cache passed from the pollster :param extra_metadata: extra dict to be used as metadata :param param: a dict of inspector specific param :return: an iterator of (value, metadata, extra) :return value: the sample value :return metadata: dict to construct sample's metadata :return extra: dict of extra metadata to help constructing sample """ def prepare_params(self, param): """Parse the params to a format which the inspector itself recognizes. :param param: inspector params from meter definition file :return: a dict of param which the inspector recognized """ return {} ceilometer-6.1.5/ceilometer/hardware/inspector/snmp.py0000664000567000056710000003020413072744706024307 0ustar jenkinsjenkins00000000000000# # Copyright 2014 ZHAW SoE # Copyright 2014 Intel Corp # # Authors: Lucas Graf # Toni Zehnder # Lianhao Lu # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Inspector for collecting data over SNMP""" import copy from pysnmp.entity.rfc3413.oneliner import cmdgen import six from ceilometer.hardware.inspector import base class SNMPException(Exception): pass def parse_snmp_return(ret, is_bulk=False): """Check the return value of snmp operations :param ret: a tuple of (errorIndication, errorStatus, errorIndex, data) returned by pysnmp :param is_bulk: True if the ret value is from GetBulkRequest :return: a tuple of (err, data) err: True if error found, or False if no error found data: a string of error description if error found, or the actual return data of the snmp operation """ err = True (errIndication, errStatus, errIdx, varBinds) = ret if errIndication: data = errIndication elif errStatus: if is_bulk: varBinds = varBinds[-1] data = "%s at %s" % (errStatus.prettyPrint(), errIdx and varBinds[int(errIdx) - 1] or "?") else: err = False data = varBinds return err, data EXACT = 'type_exact' PREFIX = 'type_prefix' class SNMPInspector(base.Inspector): # Default port _port = 161 _CACHE_KEY_OID = "snmp_cached_oid" # NOTE: The following mapping has been moved to the yaml file identified # by the config options hardware.meter_definitions_file. However, we still # keep the description here for code reading purpose. """ The following mapping define how to construct (value, metadata, extra) returned by inspect_generic MAPPING = { 'identifier: { 'matching_type': EXACT or PREFIX, 'metric_oid': (oid, value_converter) 'metadata': { metadata_name1: (oid1, value_converter), metadata_name2: (oid2, value_converter), }, 'post_op': special func to modify the return data, }, } For matching_type of EXACT, each item in the above mapping will return exact one (value, metadata, extra) tuple. The value would be returned from SNMP request GetRequest for oid of 'metric_oid', the metadata dict would be constructed based on the returning from SNMP GetRequest for oids of 'metadata'. For matching_type of PREFIX, SNMP request GetBulkRequest would be sent to get values for oids of 'metric_oid' and 'metadata' of each item in the above mapping. And each item might return multiple (value, metadata, extra) tuples, e.g. Suppose we have the following mapping: MAPPING = { 'disk.size.total': { 'matching_type': PREFIX, 'metric_oid': ("1.3.6.1.4.1.2021.9.1.6", int) 'metadata': { 'device': ("1.3.6.1.4.1.2021.9.1.3", str), 'path': ("1.3.6.1.4.1.2021.9.1.2", str), }, 'post_op': None, }, and the SNMP have the following oid/value(s): { '1.3.6.1.4.1.2021.9.1.6.1': 19222656, '1.3.6.1.4.1.2021.9.1.3.1': "/dev/sda2", '1.3.6.1.4.1.2021.9.1.2.1': "/" '1.3.6.1.4.1.2021.9.1.6.2': 808112, '1.3.6.1.4.1.2021.9.1.3.2': "tmpfs", '1.3.6.1.4.1.2021.9.1.2.2': "/run", } So here we'll return 2 instances of (value, metadata, extra): (19222656, {'device': "/dev/sda2", 'path': "/"}, None) (808112, {'device': "tmpfs", 'path': "/run"}, None) The post_op is assumed to be implemented by new metric developer. It could be used to add additional special metadata(e.g. ip address), or it could be used to add information into extra dict to be returned to construct the pollster how to build final sample, e.g. extra.update('project_id': xy, 'user_id': zw) """ def __init__(self): super(SNMPInspector, self).__init__() self._cmdGen = cmdgen.CommandGenerator() def _query_oids(self, host, oids, cache, is_bulk): # send GetRequest or GetBulkRequest to get oids values and # populate the values into cache authData = self._get_auth_strategy(host) transport = cmdgen.UdpTransportTarget((host.hostname, host.port or self._port)) oid_cache = cache.setdefault(self._CACHE_KEY_OID, {}) if is_bulk: ret = self._cmdGen.bulkCmd(authData, transport, 0, 100, *oids, lookupValues=True) else: ret = self._cmdGen.getCmd(authData, transport, *oids, lookupValues=True) (error, data) = parse_snmp_return(ret, is_bulk) if error: raise SNMPException("An error occurred, oids %(oid)s, " "host %(host)s, %(err)s" % dict(oid=oids, host=host.hostname, err=data)) # save result into cache if is_bulk: for var_bind_table_row in data: for name, val in var_bind_table_row: oid_cache[str(name)] = val else: for name, val in data: oid_cache[str(name)] = val @staticmethod def find_matching_oids(oid_cache, oid, match_type, find_one=True): matched = [] if match_type == PREFIX: for key in oid_cache.keys(): if key.startswith(oid): matched.append(key) if find_one: break else: if oid in oid_cache: matched.append(oid) return matched @staticmethod def get_oid_value(oid_cache, oid_def, suffix=''): oid, converter = oid_def value = oid_cache[oid + suffix] if converter: value = converter(value) return value @classmethod def construct_metadata(cls, oid_cache, meta_defs, suffix=''): metadata = {} for key, oid_def in six.iteritems(meta_defs): metadata[key] = cls.get_oid_value(oid_cache, oid_def, suffix) return metadata @classmethod def _find_missing_oids(cls, meter_def, cache): # find oids have not been queried and cached new_oids = [] oid_cache = cache.setdefault(cls._CACHE_KEY_OID, {}) # check metric_oid if not cls.find_matching_oids(oid_cache, meter_def['metric_oid'][0], meter_def['matching_type']): new_oids.append(meter_def['metric_oid'][0]) for metadata in meter_def['metadata'].values(): if not cls.find_matching_oids(oid_cache, metadata[0], meter_def['matching_type']): new_oids.append(metadata[0]) return new_oids def inspect_generic(self, host, cache, extra_metadata, param): # the snmp definition for the corresponding meter meter_def = param # collect oids that needs to be queried oids_to_query = self._find_missing_oids(meter_def, cache) # query oids and populate into caches if oids_to_query: self._query_oids(host, oids_to_query, cache, meter_def['matching_type'] == PREFIX) # construct (value, metadata, extra) oid_cache = cache[self._CACHE_KEY_OID] # find all oids which needed to construct final sample values # for matching type of EXACT, only 1 sample would be generated # for matching type of PREFIX, multiple samples could be generated oids_for_sample_values = self.find_matching_oids( oid_cache, meter_def['metric_oid'][0], meter_def['matching_type'], False) input_extra_metadata = extra_metadata for oid in oids_for_sample_values: suffix = oid[len(meter_def['metric_oid'][0]):] value = self.get_oid_value(oid_cache, meter_def['metric_oid'], suffix) # get the metadata for this sample value metadata = self.construct_metadata(oid_cache, meter_def['metadata'], suffix) extra_metadata = copy.deepcopy(input_extra_metadata) or {} # call post_op for special cases if meter_def['post_op']: func = getattr(self, meter_def['post_op'], None) if func: value = func(host, cache, meter_def, value, metadata, extra_metadata, suffix) yield (value, metadata, extra_metadata) def _post_op_memory_avail_to_used(self, host, cache, meter_def, value, metadata, extra, suffix): _memory_total_oid = "1.3.6.1.4.1.2021.4.5.0" if _memory_total_oid not in cache[self._CACHE_KEY_OID]: self._query_oids(host, [_memory_total_oid], cache, False) value = int(cache[self._CACHE_KEY_OID][_memory_total_oid]) - value return value def _post_op_net(self, host, cache, meter_def, value, metadata, extra, suffix): # add ip address into metadata _interface_ip_oid = "1.3.6.1.2.1.4.20.1.2" oid_cache = cache.setdefault(self._CACHE_KEY_OID, {}) if not self.find_matching_oids(oid_cache, _interface_ip_oid, PREFIX): # populate the oid into cache self._query_oids(host, [_interface_ip_oid], cache, True) ip_addr = '' for k, v in six.iteritems(oid_cache): if k.startswith(_interface_ip_oid) and v == int(suffix[1:]): ip_addr = k.replace(_interface_ip_oid + ".", "") metadata.update(ip=ip_addr) # update resource_id for each nic interface self._suffix_resource_id(host, metadata, 'name', extra) return value def _post_op_disk(self, host, cache, meter_def, value, metadata, extra, suffix): self._suffix_resource_id(host, metadata, 'device', extra) return value @staticmethod def _suffix_resource_id(host, metadata, key, extra): prefix = metadata.get(key) if prefix: res_id = extra.get('resource_id') or host.hostname res_id = res_id + ".%s" % metadata.get(key) extra.update(resource_id=res_id) @staticmethod def _get_auth_strategy(host): if host.password: auth_strategy = cmdgen.UsmUserData(host.username, authKey=host.password) else: auth_strategy = cmdgen.CommunityData(host.username or 'public') return auth_strategy def prepare_params(self, param): processed = {} processed['matching_type'] = param['matching_type'] processed['metric_oid'] = (param['oid'], eval(param['type'])) processed['post_op'] = param.get('post_op', None) processed['metadata'] = {} for k, v in six.iteritems(param.get('metadata', {})): processed['metadata'][k] = (v['oid'], eval(v['type'])) return processed ceilometer-6.1.5/ceilometer/hardware/inspector/__init__.py0000664000567000056710000000171113072744703025067 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from stevedore import driver def get_inspector(parsed_url, namespace='ceilometer.hardware.inspectors'): """Get inspector driver and load it. :param parsed_url: urlparse.SplitResult object for the inspector :param namespace: Namespace to use to look for drivers. """ loaded_driver = driver.DriverManager(namespace, parsed_url.scheme) return loaded_driver.driver() ceilometer-6.1.5/ceilometer/hardware/pollsters/0000775000567000056710000000000013072745164023001 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/hardware/pollsters/util.py0000664000567000056710000000420313072744706024330 0ustar jenkinsjenkins00000000000000# # Copyright 2013 ZHAW SoE # Copyright 2014 Intel Corp. # # Authors: Lucas Graf # Toni Zehnder # Lianhao Lu # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_utils import timeutils from six.moves.urllib import parse as urlparse from ceilometer import sample def get_metadata_from_host(host_url): return {'resource_url': urlparse.urlunsplit(host_url)} def make_resource_metadata(res_metadata=None, host_url=None): resource_metadata = dict() if res_metadata is not None: metadata = copy.copy(res_metadata) resource_metadata.update(metadata) resource_metadata.update(get_metadata_from_host(host_url)) return resource_metadata def make_sample_from_host(host_url, name, sample_type, unit, volume, project_id=None, user_id=None, resource_id=None, res_metadata=None, extra=None, name_prefix='hardware'): extra = extra or {} resource_metadata = make_resource_metadata(res_metadata, host_url) resource_metadata.update(extra) res_id = resource_id or extra.get('resource_id') or host_url.hostname if name_prefix: name = name_prefix + '.' + name return sample.Sample( name=name, type=sample_type, unit=unit, volume=volume, user_id=user_id or extra.get('user_id'), project_id=project_id or extra.get('project_id'), resource_id=res_id, timestamp=timeutils.utcnow().isoformat(), resource_metadata=resource_metadata, source='hardware', ) ceilometer-6.1.5/ceilometer/hardware/pollsters/generic.py0000664000567000056710000002127713072744706025001 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import pkg_resources from oslo_config import cfg from oslo_log import log from oslo_utils import netutils import six from ceilometer.agent import plugin_base from ceilometer import declarative from ceilometer.hardware import inspector as insloader from ceilometer.hardware.pollsters import util from ceilometer.i18n import _LE, _LW from ceilometer import sample OPTS = [ cfg.StrOpt('meter_definitions_file', default="snmp.yaml", help="Configuration file for defining hardware snmp meters." ), ] cfg.CONF.register_opts(OPTS, group='hardware') LOG = log.getLogger(__name__) class MeterDefinitionException(Exception): def __init__(self, message, definition_cfg): super(MeterDefinitionException, self).__init__(message) self.message = message self.definition_cfg = definition_cfg def __str__(self): return '%s %s: %s' % (self.__class__.__name__, self.definition_cfg, self.message) class MeterDefinition(object): required_fields = ['name', 'unit', 'type'] def __init__(self, definition_cfg): self.cfg = definition_cfg for fname, fval in self.cfg.items(): if (isinstance(fname, six.string_types) and (fname in self.required_fields or fname.endswith('_inspector'))): setattr(self, fname, fval) else: LOG.warning(_LW("Ignore unrecognized field %s"), fname) for fname in self.required_fields: if not getattr(self, fname, None): raise MeterDefinitionException( _LE("Missing field %s") % fname, self.cfg) if self.type not in sample.TYPES: raise MeterDefinitionException( _LE("Unrecognized type value %s") % self.type, self.cfg) class GenericHardwareDeclarativePollster(plugin_base.PollsterBase): CACHE_KEY = 'hardware.generic' mapping = None def __init__(self): super(GenericHardwareDeclarativePollster, self).__init__() self.inspectors = {} def _update_meter_definition(self, definition): self.meter_definition = definition self.cached_inspector_params = {} @property def default_discovery(self): return 'tripleo_overcloud_nodes' @staticmethod def _parse_resource(res): """Parse resource from discovery. Either URL can be given or dict. Dict has to contain at least keys 'resource_id' and 'resource_url', all the dict keys will be stored as metadata. :param res: URL or dict containing all resource info. :return parsed_url, resource_id, metadata: Returns parsed URL used for SNMP query, unique identifier of the resource and metadata of the resource. """ parsed_url, resource_id, metadata = (None, None, None) if isinstance(res, dict): if 'resource_url' not in res or 'resource_id' not in res: LOG.error(_LE('Passed resource dict must contain keys ' 'resource_id and resource_url.')) else: metadata = res parsed_url = netutils.urlsplit(res['resource_url']) resource_id = res['resource_id'] else: metadata = {} parsed_url = netutils.urlsplit(res) resource_id = res return parsed_url, resource_id, metadata def _get_inspector(self, parsed_url): if parsed_url.scheme not in self.inspectors: try: driver = insloader.get_inspector(parsed_url) self.inspectors[parsed_url.scheme] = driver except Exception as err: LOG.exception(_LE("Cannot load inspector %(name)s: %(err)s"), dict(name=parsed_url.scheme, err=err)) raise err return self.inspectors[parsed_url.scheme] def get_samples(self, manager, cache, resources=None): """Return an iterable of Sample instances from polling the resources. :param manager: The service manager invoking the plugin :param cache: A dictionary for passing data between plugins :param resources: end point to poll data from """ resources = resources or [] h_cache = cache.setdefault(self.CACHE_KEY, {}) sample_iters = [] # Get the meter identifiers to poll identifier = self.meter_definition.name for resource in resources: parsed_url, res, extra_metadata = self._parse_resource(resource) if parsed_url is None: LOG.error(_LE("Skip invalid resource %s"), resource) continue ins = self._get_inspector(parsed_url) try: # Call hardware inspector to poll for the data i_cache = h_cache.setdefault(res, {}) # Prepare inspector parameters and cache it for performance param_key = parsed_url.scheme + '.' + identifier inspector_param = self.cached_inspector_params.get(param_key) if not inspector_param: param = getattr(self.meter_definition, parsed_url.scheme + '_inspector', {}) inspector_param = ins.prepare_params(param) self.cached_inspector_params[param_key] = inspector_param if identifier not in i_cache: i_cache[identifier] = list(ins.inspect_generic( host=parsed_url, cache=i_cache, extra_metadata=extra_metadata, param=inspector_param)) # Generate samples if i_cache[identifier]: sample_iters.append(self.generate_samples( parsed_url, i_cache[identifier])) except Exception as err: LOG.exception(_LE('inspector call failed for %(ident)s ' 'host %(host)s: %(err)s'), dict(ident=identifier, host=parsed_url.hostname, err=err)) return itertools.chain(*sample_iters) def generate_samples(self, host_url, data): """Generate a list of Sample from the data returned by inspector :param host_url: host url of the endpoint :param data: list of data returned by the corresponding inspector """ samples = [] definition = self.meter_definition for (value, metadata, extra) in data: s = util.make_sample_from_host(host_url, name=definition.name, sample_type=definition.type, unit=definition.unit, volume=value, res_metadata=metadata, extra=extra, name_prefix=None) samples.append(s) return samples @classmethod def build_pollsters(cls): if not cls.mapping: definition_cfg = declarative.load_definitions( {}, cfg.CONF.hardware.meter_definitions_file, pkg_resources.resource_filename(__name__, "data/snmp.yaml")) cls.mapping = load_definition(definition_cfg) pollsters = [] for name in cls.mapping: pollster = cls() pollster._update_meter_definition(cls.mapping[name]) pollsters.append((name, pollster)) return pollsters def load_definition(config_def): mappings = {} for meter_def in config_def.get('metric', []): try: meter = MeterDefinition(meter_def) mappings[meter.name] = meter except MeterDefinitionException as me: errmsg = (_LE("Error loading meter definition : %(err)s") % dict(err=me.message)) LOG.error(errmsg) return mappings ceilometer-6.1.5/ceilometer/hardware/pollsters/data/0000775000567000056710000000000013072745164023712 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/hardware/pollsters/data/snmp.yaml0000664000567000056710000001176513072744703025563 0ustar jenkinsjenkins00000000000000--- metric: # cpu - name: hardware.cpu.load.1min unit: process type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.10.1.3.1" type: "lambda x: float(str(x))" - name: hardware.cpu.load.5min unit: process type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.10.1.3.2" type: "lambda x: float(str(x))" - name: hardware.cpu.load.15min unit: process type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.10.1.3.3" type: "lambda x: float(str(x))" - name: hardware.cpu.util unit: "%" type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.11.9.0" type: "int" # disk - name: hardware.disk.size.total unit: KB type: gauge snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.4.1.2021.9.1.6" type: "int" metadata: &disk_metadata path: oid: "1.3.6.1.4.1.2021.9.1.2" type: "str" device: oid: "1.3.6.1.4.1.2021.9.1.3" type: "str" post_op: "_post_op_disk" - name: hardware.disk.size.used unit: KB type: gauge snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.4.1.2021.9.1.8" type: "int" metadata: *disk_metadata post_op: "_post_op_disk" # memory - name: hardware.memory.total unit: KB type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.4.5.0" type: "int" - name: hardware.memory.used unit: KB type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.4.6.0" type: "int" post_op: "_post_op_memory_avail_to_used" - name: hardware.memory.swap.total unit: KB type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.4.3.0" type: "int" - name: hardware.memory.swap.avail unit: KB type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.4.4.0" type: "int" - name: hardware.memory.buffer unit: KB type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.4.14.0" type: "int" - name: hardware.memory.cached unit: KB type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.4.15.0" type: "int" # network interface - name: hardware.network.incoming.bytes unit: B type: cumulative snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.2.1.2.2.1.10" type: "int" metadata: &net_metadata name: oid: "1.3.6.1.2.1.2.2.1.2" type: "str" speed: oid: "1.3.6.1.2.1.2.2.1.5" type: "lambda x: int(x) / 8" mac: oid: "1.3.6.1.2.1.2.2.1.6" type: "lambda x: x.prettyPrint().replace('0x', '')" post_op: "_post_op_net" - name: hardware.network.outgoing.bytes unit: B type: cumulative snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.2.1.2.2.1.16" type: "int" metadata: *net_metadata post_op: "_post_op_net" - name: hardware.network.outgoing.errors unit: packet type: cumulative snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.2.1.2.2.1.20" type: "int" metadata: *net_metadata post_op: "_post_op_net" #network aggregate - name: hardware.network.ip.outgoing.datagrams unit: datagrams type: cumulative snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.2.1.4.10.0" type: "int" - name: hardware.network.ip.incoming.datagrams unit: datagrams type: cumulative snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.2.1.4.3.0" type: "int" #system stats - name: hardware.system_stats.cpu.idle unit: "%" type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.11.11.0" type: "int" - name: hardware.system_stats.io.outgoing.blocks unit: blocks type: cumulative snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.11.57.0" type: "int" - name: hardware.system_stats.io.incoming.blocks unit: blocks type: cumulative snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.11.58.0" type: "int" ceilometer-6.1.5/ceilometer/hardware/pollsters/__init__.py0000664000567000056710000000000013072744703025076 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/hardware/__init__.py0000664000567000056710000000000013072744703023047 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/network/0000775000567000056710000000000013072745164020646 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/network/notifications.py0000664000567000056710000002124613072744706024077 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handler for producing network counter messages from Neutron notification events. """ from oslo_config import cfg import oslo_messaging from ceilometer.agent import plugin_base from ceilometer import sample OPTS = [ cfg.StrOpt('neutron_control_exchange', default='neutron', help="Exchange name for Neutron notifications."), ] cfg.CONF.register_opts(OPTS) class NetworkNotificationBase(plugin_base.NotificationBase): resource_name = None @property def event_types(self): return [ # NOTE(flwang): When the *.create.start notification sending, # there is no resource id assigned by Neutron yet. So we ignore # the *.create.start notification for now and only listen the # *.create.end to make sure the resource id is existed. '%s.create.end' % self.resource_name, '%s.update.*' % self.resource_name, '%s.exists' % self.resource_name, # FIXME(dhellmann): Neutron delete notifications do # not include the same metadata as the other messages, # so we ignore them for now. This isn't ideal, since # it may mean we miss charging for some amount of time, # but it is better than throwing away the existing # metadata for a resource when it is deleted. # '%s.delete.start' % (self.resource_name), ] def get_targets(self, conf): """Return a sequence of oslo_messaging.Target This sequence is defining the exchange and topics to be connected for this plugin. """ return [oslo_messaging.Target(topic=topic, exchange=conf.neutron_control_exchange) for topic in self.get_notification_topics(conf)] def process_notification(self, message): counter_name = getattr(self, 'counter_name', self.resource_name) unit_value = getattr(self, 'unit', self.resource_name) resource = message['payload'].get(self.resource_name) if resource: # NOTE(liusheng): In %s.update.start notifications, the id is in # message['payload'] instead of resource itself. if message['event_type'].endswith('update.start'): resource['id'] = message['payload']['id'] resources = [resource] else: resources = message['payload'].get(self.resource_name + 's', []) resource_message = message.copy() for resource in resources: resource_message['payload'] = resource yield sample.Sample.from_notification( name=counter_name, type=sample.TYPE_GAUGE, unit=unit_value, volume=1, user_id=resource_message['_context_user_id'], project_id=resource_message['_context_tenant_id'], resource_id=resource['id'], message=resource_message) event_type_split = resource_message['event_type'].split('.') if len(event_type_split) > 2: yield sample.Sample.from_notification( name=counter_name + "." + event_type_split[1], type=sample.TYPE_DELTA, unit=unit_value, volume=1, user_id=resource_message['_context_user_id'], project_id=resource_message['_context_tenant_id'], resource_id=resource['id'], message=resource_message) class Network(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron network notifications. Handle network.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'network' class Subnet(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle subnet.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'subnet' class Port(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle port.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'port' class Router(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle router.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'router' class FloatingIP(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle floatingip.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'floatingip' counter_name = 'ip.floating' unit = 'ip' class Pool(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle pool.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'pool' counter_name = 'network.services.lb.pool' class Vip(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle vip.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'vip' counter_name = 'network.services.lb.vip' class Member(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle member.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'member' counter_name = 'network.services.lb.member' class HealthMonitor(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle health_monitor.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'health_monitor' counter_name = 'network.services.lb.health_monitor' class Firewall(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle firewall.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'firewall' counter_name = 'network.services.firewall' class FirewallPolicy(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle firewall_policy.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'firewall_policy' counter_name = 'network.services.firewall.policy' class FirewallRule(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle firewall_rule.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'firewall_rule' counter_name = 'network.services.firewall.rule' class VPNService(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle vpnservice.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'vpnservice' counter_name = 'network.services.vpn' class IPSecPolicy(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle pool.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'ipsecpolicy' counter_name = 'network.services.vpn.ipsecpolicy' class IKEPolicy(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle ikepolicy.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'ikepolicy' counter_name = 'network.services.vpn.ikepolicy' class IPSecSiteConnection(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle ipsec_site_connection.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'ipsec_site_connection' counter_name = 'network.services.vpn.connections' ceilometer-6.1.5/ceilometer/network/statistics/0000775000567000056710000000000013072745164023040 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/network/statistics/table.py0000664000567000056710000000242713072744703024504 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network import statistics from ceilometer import sample class TablePollster(statistics._Base): meter_name = 'switch.table' meter_type = sample.TYPE_GAUGE meter_unit = 'table' class TablePollsterActiveEntries(statistics._Base): meter_name = 'switch.table.active.entries' meter_type = sample.TYPE_GAUGE meter_unit = 'entry' class TablePollsterLookupPackets(statistics._Base): meter_name = 'switch.table.lookup.packets' meter_type = sample.TYPE_GAUGE meter_unit = 'packet' class TablePollsterMatchedPackets(statistics._Base): meter_name = 'switch.table.matched.packets' meter_type = sample.TYPE_GAUGE meter_unit = 'packet' ceilometer-6.1.5/ceilometer/network/statistics/driver.py0000664000567000056710000000163613072744706024714 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class Driver(object): @abc.abstractmethod def get_sample_data(self, meter_name, parse_url, params, cache): """Return volume, resource_id, resource_metadata, timestamp in tuple. If not implemented for meter_name, returns None """ ceilometer-6.1.5/ceilometer/network/statistics/opendaylight/0000775000567000056710000000000013072745164025527 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/network/statistics/opendaylight/driver.py0000664000567000056710000004246413072744706027407 0ustar jenkinsjenkins00000000000000# # Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_utils import timeutils import six from six import moves from six.moves.urllib import parse as urlparse from ceilometer.i18n import _ from ceilometer.network.statistics import driver from ceilometer.network.statistics.opendaylight import client from ceilometer import utils LOG = log.getLogger(__name__) def _get_properties(properties, prefix='properties'): resource_meta = {} if properties is not None: for k, v in six.iteritems(properties): value = v['value'] key = prefix + '_' + k if 'name' in v: key += '_' + v['name'] resource_meta[key] = value return resource_meta def _get_int_sample(key, statistic, resource_id, resource_meta): if key not in statistic: return None return int(statistic[key]), resource_id, resource_meta class OpenDayLightDriver(driver.Driver): """Driver of network info collector from OpenDaylight. This driver uses resources in "pipeline.yaml". Resource requires below conditions: * resource is url * scheme is "opendaylight" This driver can be configured via query parameters. Supported parameters: * scheme: The scheme of request url to OpenDaylight REST API endpoint. (default http) * auth: Auth strategy of http. This parameter can be set basic and digest.(default None) * user: This is username that is used by auth.(default None) * password: This is password that is used by auth.(default None) * container_name: Name of container of OpenDaylight.(default "default") This parameter allows multi vaues. e.g.:: opendaylight://127.0.0.1:8080/controller/nb/v2?container_name=default& container_name=egg&auth=basic&user=admin&password=admin&scheme=http In this case, the driver send request to below URLs: http://127.0.0.1:8080/controller/nb/v2/statistics/default/flow http://127.0.0.1:8080/controller/nb/v2/statistics/egg/flow """ @staticmethod def _prepare_cache(endpoint, params, cache): if 'network.statistics.opendaylight' in cache: return cache['network.statistics.opendaylight'] data = {} container_names = params.get('container_name', ['default']) odl_params = {} if 'auth' in params: odl_params['auth'] = params['auth'][0] if 'user' in params: odl_params['user'] = params['user'][0] if 'password' in params: odl_params['password'] = params['password'][0] cs = client.Client(endpoint, odl_params) for container_name in container_names: try: container_data = {} # get flow statistics container_data['flow'] = cs.statistics.get_flow_statistics( container_name) # get port statistics container_data['port'] = cs.statistics.get_port_statistics( container_name) # get table statistics container_data['table'] = cs.statistics.get_table_statistics( container_name) # get topology container_data['topology'] = cs.topology.get_topology( container_name) # get switch information container_data['switch'] = cs.switch_manager.get_nodes( container_name) # get and optimize user links # e.g. # before: # "OF|2@OF|00:00:00:00:00:00:00:02" # after: # { # 'port': { # 'type': 'OF', # 'id': '2'}, # 'node': { # 'type': 'OF', # 'id': '00:00:00:00:00:00:00:02' # } # } user_links_raw = cs.topology.get_user_links(container_name) user_links = [] container_data['user_links'] = user_links for user_link_row in user_links_raw['userLinks']: user_link = {} for k, v in six.iteritems(user_link_row): if (k == "dstNodeConnector" or k == "srcNodeConnector"): port_raw, node_raw = v.split('@') port = {} port['type'], port['id'] = port_raw.split('|') node = {} node['type'], node['id'] = node_raw.split('|') v = {'port': port, 'node': node} user_link[k] = v user_links.append(user_link) # get link status to hosts container_data['active_hosts'] = ( cs.host_tracker.get_active_hosts(container_name)) container_data['inactive_hosts'] = ( cs.host_tracker.get_inactive_hosts(container_name)) container_data['timestamp'] = timeutils.utcnow().isoformat() data[container_name] = container_data except Exception: LOG.exception(_('Request failed to connect to OpenDaylight' ' with NorthBound REST API')) cache['network.statistics.opendaylight'] = data return data def get_sample_data(self, meter_name, parse_url, params, cache): extractor = self._get_extractor(meter_name) if extractor is None: # The way to getting meter is not implemented in this driver or # OpenDaylight REST API has not api to getting meter. return None iter = self._get_iter(meter_name) if iter is None: # The way to getting meter is not implemented in this driver or # OpenDaylight REST API has not api to getting meter. return None parts = urlparse.ParseResult(params.get('scheme', ['http'])[0], parse_url.netloc, parse_url.path, None, None, None) endpoint = urlparse.urlunparse(parts) data = self._prepare_cache(endpoint, params, cache) samples = [] for name, value in six.iteritems(data): timestamp = value['timestamp'] for sample in iter(extractor, value): if sample is not None: # set controller name and container name # to resource_metadata sample[2]['controller'] = 'OpenDaylight' sample[2]['container'] = name samples.append(sample + (timestamp, )) return samples def _get_iter(self, meter_name): if meter_name == 'switch': return self._iter_switch elif meter_name.startswith('switch.flow'): return self._iter_flow elif meter_name.startswith('switch.table'): return self._iter_table elif meter_name.startswith('switch.port'): return self._iter_port def _get_extractor(self, meter_name): method_name = '_' + meter_name.replace('.', '_') return getattr(self, method_name, None) @staticmethod def _iter_switch(extractor, data): for switch in data['switch']['nodeProperties']: yield extractor(switch, switch['node']['id'], {}) @staticmethod def _switch(statistic, resource_id, resource_meta): resource_meta.update(_get_properties(statistic.get('properties'))) return 1, resource_id, resource_meta @staticmethod def _iter_port(extractor, data): for port_statistic in data['port']['portStatistics']: for statistic in port_statistic['portStatistic']: resource_meta = {'port': statistic['nodeConnector']['id']} yield extractor(statistic, port_statistic['node']['id'], resource_meta, data) @staticmethod def _switch_port(statistic, resource_id, resource_meta, data): my_node_id = resource_id my_port_id = statistic['nodeConnector']['id'] # link status from topology edge_properties = data['topology']['edgeProperties'] for edge_property in edge_properties: edge = edge_property['edge'] if (edge['headNodeConnector']['node']['id'] == my_node_id and edge['headNodeConnector']['id'] == my_port_id): target_node = edge['tailNodeConnector'] elif (edge['tailNodeConnector']['node']['id'] == my_node_id and edge['tailNodeConnector']['id'] == my_port_id): target_node = edge['headNodeConnector'] else: continue resource_meta['topology_node_id'] = target_node['node']['id'] resource_meta['topology_node_port'] = target_node['id'] resource_meta.update(_get_properties( edge_property.get('properties'), prefix='topology')) break # link status from user links for user_link in data['user_links']: if (user_link['dstNodeConnector']['node']['id'] == my_node_id and user_link['dstNodeConnector']['port']['id'] == my_port_id): target_node = user_link['srcNodeConnector'] elif (user_link['srcNodeConnector']['node']['id'] == my_node_id and user_link['srcNodeConnector']['port']['id'] == my_port_id): target_node = user_link['dstNodeConnector'] else: continue resource_meta['user_link_node_id'] = target_node['node']['id'] resource_meta['user_link_node_port'] = target_node['port']['id'] resource_meta['user_link_status'] = user_link['status'] resource_meta['user_link_name'] = user_link['name'] break # link status to hosts for hosts, status in moves.zip( [data['active_hosts'], data['inactive_hosts']], ['active', 'inactive']): for host_config in hosts['hostConfig']: if (host_config['nodeId'] != my_node_id or host_config['nodeConnectorId'] != my_port_id): continue resource_meta['host_status'] = status for key in ['dataLayerAddress', 'vlan', 'staticHost', 'networkAddress']: if key in host_config: resource_meta['host_' + key] = host_config[key] break return 1, resource_id, resource_meta @staticmethod def _switch_port_receive_packets(statistic, resource_id, resource_meta, data): return _get_int_sample('receivePackets', statistic, resource_id, resource_meta) @staticmethod def _switch_port_transmit_packets(statistic, resource_id, resource_meta, data): return _get_int_sample('transmitPackets', statistic, resource_id, resource_meta) @staticmethod def _switch_port_receive_bytes(statistic, resource_id, resource_meta, data): return _get_int_sample('receiveBytes', statistic, resource_id, resource_meta) @staticmethod def _switch_port_transmit_bytes(statistic, resource_id, resource_meta, data): return _get_int_sample('transmitBytes', statistic, resource_id, resource_meta) @staticmethod def _switch_port_receive_drops(statistic, resource_id, resource_meta, data): return _get_int_sample('receiveDrops', statistic, resource_id, resource_meta) @staticmethod def _switch_port_transmit_drops(statistic, resource_id, resource_meta, data): return _get_int_sample('transmitDrops', statistic, resource_id, resource_meta) @staticmethod def _switch_port_receive_errors(statistic, resource_id, resource_meta, data): return _get_int_sample('receiveErrors', statistic, resource_id, resource_meta) @staticmethod def _switch_port_transmit_errors(statistic, resource_id, resource_meta, data): return _get_int_sample('transmitErrors', statistic, resource_id, resource_meta) @staticmethod def _switch_port_receive_frame_error(statistic, resource_id, resource_meta, data): return _get_int_sample('receiveFrameError', statistic, resource_id, resource_meta) @staticmethod def _switch_port_receive_overrun_error(statistic, resource_id, resource_meta, data): return _get_int_sample('receiveOverRunError', statistic, resource_id, resource_meta) @staticmethod def _switch_port_receive_crc_error(statistic, resource_id, resource_meta, data): return _get_int_sample('receiveCrcError', statistic, resource_id, resource_meta) @staticmethod def _switch_port_collision_count(statistic, resource_id, resource_meta, data): return _get_int_sample('collisionCount', statistic, resource_id, resource_meta) @staticmethod def _iter_table(extractor, data): for table_statistic in data['table']['tableStatistics']: for statistic in table_statistic['tableStatistic']: resource_meta = {'table_id': statistic['nodeTable']['id']} yield extractor(statistic, table_statistic['node']['id'], resource_meta) @staticmethod def _switch_table(statistic, resource_id, resource_meta): return 1, resource_id, resource_meta @staticmethod def _switch_table_active_entries(statistic, resource_id, resource_meta): return _get_int_sample('activeCount', statistic, resource_id, resource_meta) @staticmethod def _switch_table_lookup_packets(statistic, resource_id, resource_meta): return _get_int_sample('lookupCount', statistic, resource_id, resource_meta) @staticmethod def _switch_table_matched_packets(statistic, resource_id, resource_meta): return _get_int_sample('matchedCount', statistic, resource_id, resource_meta) @staticmethod def _iter_flow(extractor, data): for flow_statistic in data['flow']['flowStatistics']: for statistic in flow_statistic['flowStatistic']: resource_meta = {'flow_id': statistic['flow']['id'], 'table_id': statistic['tableId']} for key, value in utils.dict_to_keyval(statistic['flow'], 'flow'): resource_meta[key.replace('.', '_')] = value yield extractor(statistic, flow_statistic['node']['id'], resource_meta) @staticmethod def _switch_flow(statistic, resource_id, resource_meta): return 1, resource_id, resource_meta @staticmethod def _switch_flow_duration_seconds(statistic, resource_id, resource_meta): return _get_int_sample('durationSeconds', statistic, resource_id, resource_meta) @staticmethod def _switch_flow_duration_nanoseconds(statistic, resource_id, resource_meta): return _get_int_sample('durationNanoseconds', statistic, resource_id, resource_meta) @staticmethod def _switch_flow_packets(statistic, resource_id, resource_meta): return _get_int_sample('packetCount', statistic, resource_id, resource_meta) @staticmethod def _switch_flow_bytes(statistic, resource_id, resource_meta): return _get_int_sample('byteCount', statistic, resource_id, resource_meta) ceilometer-6.1.5/ceilometer/network/statistics/opendaylight/client.py0000664000567000056710000001443513072744706027367 0ustar jenkinsjenkins00000000000000# # Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_log import log import requests from requests import auth import six from ceilometer.i18n import _ CONF = cfg.CONF CONF.import_opt('http_timeout', 'ceilometer.service') LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class _Base(object): """Base class of OpenDaylight REST APIs Clients.""" @abc.abstractproperty def base_url(self): """Returns base url for each REST API.""" def __init__(self, client): self.client = client def request(self, path, container_name): return self.client.request(self.base_url + path, container_name) class OpenDaylightRESTAPIFailed(Exception): pass class StatisticsAPIClient(_Base): """OpenDaylight Statistics REST API Client Base URL: {endpoint}/statistics/{containerName} """ base_url = '/statistics/%(container_name)s' def get_port_statistics(self, container_name): """Get port statistics URL: {Base URL}/port """ return self.request('/port', container_name) def get_flow_statistics(self, container_name): """Get flow statistics URL: {Base URL}/flow """ return self.request('/flow', container_name) def get_table_statistics(self, container_name): """Get table statistics URL: {Base URL}/table """ return self.request('/table', container_name) class TopologyAPIClient(_Base): """OpenDaylight Topology REST API Client Base URL: {endpoint}/topology/{containerName} """ base_url = '/topology/%(container_name)s' def get_topology(self, container_name): """Get topology URL: {Base URL} """ return self.request('', container_name) def get_user_links(self, container_name): """Get user links URL: {Base URL}/userLinks """ return self.request('/userLinks', container_name) class SwitchManagerAPIClient(_Base): """OpenDaylight Switch Manager REST API Client Base URL: {endpoint}/switchmanager/{containerName} """ base_url = '/switchmanager/%(container_name)s' def get_nodes(self, container_name): """Get node information URL: {Base URL}/nodes """ return self.request('/nodes', container_name) class HostTrackerAPIClient(_Base): """OpenDaylight Host Tracker REST API Client Base URL: {endpoint}/hosttracker/{containerName} """ base_url = '/hosttracker/%(container_name)s' def get_active_hosts(self, container_name): """Get active hosts informatinos URL: {Base URL}/hosts/active """ return self.request('/hosts/active', container_name) def get_inactive_hosts(self, container_name): """Get inactive hosts information URL: {Base URL}/hosts/inactive """ return self.request('/hosts/inactive', container_name) class Client(object): def __init__(self, endpoint, params): self.statistics = StatisticsAPIClient(self) self.topology = TopologyAPIClient(self) self.switch_manager = SwitchManagerAPIClient(self) self.host_tracker = HostTrackerAPIClient(self) self._endpoint = endpoint self._req_params = self._get_req_params(params) @staticmethod def _get_req_params(params): req_params = { 'headers': { 'Accept': 'application/json' }, 'timeout': CONF.http_timeout, } auth_way = params.get('auth') if auth_way in ['basic', 'digest']: user = params.get('user') password = params.get('password') if auth_way == 'basic': auth_class = auth.HTTPBasicAuth else: auth_class = auth.HTTPDigestAuth req_params['auth'] = auth_class(user, password) return req_params def _log_req(self, url): curl_command = ['REQ: curl -i -X GET ', '"%s" ' % (url)] if 'auth' in self._req_params: auth_class = self._req_params['auth'] if isinstance(auth_class, auth.HTTPBasicAuth): curl_command.append('--basic ') else: curl_command.append('--digest ') curl_command.append('--user "%s":"%s" ' % (auth_class.username, auth_class.password)) for name, value in six.iteritems(self._req_params['headers']): curl_command.append('-H "%s: %s" ' % (name, value)) LOG.debug(''.join(curl_command)) @staticmethod def _log_res(resp): dump = ['RES: \n', 'HTTP %.1f %s %s\n' % (resp.raw.version, resp.status_code, resp.reason)] dump.extend('%s: %s\n' % (k, v) for k, v in six.iteritems(resp.headers)) dump.append('\n') if resp.content: dump.extend([resp.content, '\n']) LOG.debug(''.join(dump)) def _http_request(self, url): if CONF.debug: self._log_req(url) resp = requests.get(url, **self._req_params) if CONF.debug: self._log_res(resp) if resp.status_code // 100 != 2: raise OpenDaylightRESTAPIFailed( _('OpenDaylitght API returned %(status)s %(reason)s') % {'status': resp.status_code, 'reason': resp.reason}) return resp.json() def request(self, path, container_name): url = self._endpoint + path % {'container_name': container_name} return self._http_request(url) ceilometer-6.1.5/ceilometer/network/statistics/opendaylight/__init__.py0000664000567000056710000000000013072744703027624 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/network/statistics/flow.py0000664000567000056710000000263413072744703024364 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network import statistics from ceilometer import sample class FlowPollster(statistics._Base): meter_name = 'switch.flow' meter_type = sample.TYPE_GAUGE meter_unit = 'flow' class FlowPollsterDurationSeconds(statistics._Base): meter_name = 'switch.flow.duration_seconds' meter_type = sample.TYPE_GAUGE meter_unit = 's' class FlowPollsterDurationNanoseconds(statistics._Base): meter_name = 'switch.flow.duration_nanoseconds' meter_type = sample.TYPE_GAUGE meter_unit = 'ns' class FlowPollsterPackets(statistics._Base): meter_name = 'switch.flow.packets' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class FlowPollsterBytes(statistics._Base): meter_name = 'switch.flow.bytes' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'B' ceilometer-6.1.5/ceilometer/network/statistics/port.py0000664000567000056710000000541413072744703024400 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network import statistics from ceilometer import sample class PortPollster(statistics._Base): meter_name = 'switch.port' meter_type = sample.TYPE_GAUGE meter_unit = 'port' class PortPollsterReceivePackets(statistics._Base): meter_name = 'switch.port.receive.packets' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterTransmitPackets(statistics._Base): meter_name = 'switch.port.transmit.packets' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterReceiveBytes(statistics._Base): meter_name = 'switch.port.receive.bytes' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'B' class PortPollsterTransmitBytes(statistics._Base): meter_name = 'switch.port.transmit.bytes' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'B' class PortPollsterReceiveDrops(statistics._Base): meter_name = 'switch.port.receive.drops' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterTransmitDrops(statistics._Base): meter_name = 'switch.port.transmit.drops' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterReceiveErrors(statistics._Base): meter_name = 'switch.port.receive.errors' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterTransmitErrors(statistics._Base): meter_name = 'switch.port.transmit.errors' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterReceiveFrameErrors(statistics._Base): meter_name = 'switch.port.receive.frame_error' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterReceiveOverrunErrors(statistics._Base): meter_name = 'switch.port.receive.overrun_error' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterReceiveCRCErrors(statistics._Base): meter_name = 'switch.port.receive.crc_error' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterCollisionCount(statistics._Base): meter_name = 'switch.port.collision.count' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' ceilometer-6.1.5/ceilometer/network/statistics/switch.py0000664000567000056710000000144513072744703024715 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network import statistics from ceilometer import sample class SWPollster(statistics._Base): meter_name = 'switch' meter_type = sample.TYPE_GAUGE meter_unit = 'switch' ceilometer-6.1.5/ceilometer/network/statistics/opencontrail/0000775000567000056710000000000013072745164025535 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/network/statistics/opencontrail/driver.py0000664000567000056710000001557513072744706027420 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_utils import timeutils from six.moves.urllib import parse as urlparse from ceilometer.network.statistics import driver from ceilometer.network.statistics.opencontrail import client from ceilometer import neutron_client class OpencontrailDriver(driver.Driver): """Driver of network analytics of Opencontrail. This driver uses resources in "pipeline.yaml". Resource requires below conditions: * resource is url * scheme is "opencontrail" This driver can be configured via query parameters. Supported parameters: * scheme: The scheme of request url to Opencontrail Analytics endpoint. (default "http") * virtual_network Specify the virtual network. (default None) * fqdn_uuid: Specify the VM fqdn UUID. (default "*") * resource: The resource on which the counters are retrieved. (default "if_stats_list") * fip_stats_list: Traffic on floating ips * if_stats_list: Traffic on VM interfaces e.g.:: opencontrail://localhost:8081/?resource=fip_stats_list& virtual_network=default-domain:openstack:public """ @staticmethod def _prepare_cache(endpoint, params, cache): if 'network.statistics.opencontrail' in cache: return cache['network.statistics.opencontrail'] data = { 'o_client': client.Client(endpoint), 'n_client': neutron_client.Client() } cache['network.statistics.opencontrail'] = data return data def get_sample_data(self, meter_name, parse_url, params, cache): parts = urlparse.ParseResult(params.get('scheme', ['http'])[0], parse_url.netloc, parse_url.path, None, None, None) endpoint = urlparse.urlunparse(parts) iter = self._get_iter(meter_name) if iter is None: # The extractor for this meter is not implemented or the API # doesn't have method to get this meter. return extractor = self._get_extractor(meter_name) if extractor is None: # The extractor for this meter is not implemented or the API # doesn't have method to get this meter. return data = self._prepare_cache(endpoint, params, cache) ports = data['n_client'].port_get_all() ports_map = dict((port['id'], port) for port in ports) resource = params.get('resource', ['if_stats_list'])[0] fqdn_uuid = params.get('fqdn_uuid', ['*'])[0] virtual_network = params.get('virtual_network', [None])[0] timestamp = timeutils.utcnow().isoformat() statistics = data['o_client'].networks.get_vm_statistics(fqdn_uuid) if not statistics: return for value in statistics['value']: for sample in iter(extractor, value, ports_map, resource, virtual_network): if sample is not None: yield sample + (timestamp, ) def _get_iter(self, meter_name): if meter_name.startswith('switch.port'): return self._iter_port def _get_extractor(self, meter_name): method_name = '_' + meter_name.replace('.', '_') return getattr(self, method_name, None) @staticmethod def _explode_name(fq_name): m = re.match( "(?P[^:]+):(?P.+):(?P[^:]+)", fq_name) if not m: return return m.group('domain'), m.group('project'), m.group('port_id') @staticmethod def _get_resource_meta(ports_map, stat, resource, network): if resource == 'fip_stats_list': if network and (network != stat['virtual_network']): return name = stat['iface_name'] else: name = stat['name'] domain, project, port_id = OpencontrailDriver._explode_name(name) port = ports_map.get(port_id) tenant_id = None network_id = None device_owner_id = None if port: tenant_id = port['tenant_id'] network_id = port['network_id'] device_owner_id = port['device_id'] resource_meta = {'device_owner_id': device_owner_id, 'network_id': network_id, 'project_id': tenant_id, 'project': project, 'resource': resource, 'domain': domain} return port_id, resource_meta @staticmethod def _iter_port(extractor, value, ports_map, resource, virtual_network=None): stats = value['value']['UveVirtualMachineAgent'].get(resource, []) for stat in stats: if type(stat) is list: for sub_stats, node in zip(*[iter(stat)] * 2): for sub_stat in sub_stats: result = OpencontrailDriver._get_resource_meta( ports_map, sub_stat, resource, virtual_network) if not result: continue port_id, resource_meta = result yield extractor(sub_stat, port_id, resource_meta) else: result = OpencontrailDriver._get_resource_meta( ports_map, stat, resource, virtual_network) if not result: continue port_id, resource_meta = result yield extractor(stat, port_id, resource_meta) @staticmethod def _switch_port_receive_packets(statistic, resource_id, resource_meta): return int(statistic['in_pkts']), resource_id, resource_meta @staticmethod def _switch_port_transmit_packets(statistic, resource_id, resource_meta): return int(statistic['out_pkts']), resource_id, resource_meta @staticmethod def _switch_port_receive_bytes(statistic, resource_id, resource_meta): return int(statistic['in_bytes']), resource_id, resource_meta @staticmethod def _switch_port_transmit_bytes(statistic, resource_id, resource_meta): return int(statistic['out_bytes']), resource_id, resource_meta ceilometer-6.1.5/ceilometer/network/statistics/opencontrail/client.py0000664000567000056710000000710013072744706027364 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_config import cfg from oslo_log import log import requests import six from six.moves.urllib import parse as urlparse from ceilometer.i18n import _ CONF = cfg.CONF CONF.import_opt('http_timeout', 'ceilometer.service') LOG = log.getLogger(__name__) class OpencontrailAPIFailed(Exception): pass class AnalyticsAPIBaseClient(object): """Opencontrail Base Statistics REST API Client.""" def __init__(self, endpoint, data): self.endpoint = endpoint self.data = data or {} def request(self, path, fqdn_uuid, data=None): req_data = copy.copy(self.data) if data: req_data.update(data) req_params = self._get_req_params(data=req_data) url = urlparse.urljoin(self.endpoint, path + fqdn_uuid) self._log_req(url, req_params) resp = requests.get(url, **req_params) self._log_res(resp) if resp.status_code != 200: raise OpencontrailAPIFailed( _('Opencontrail API returned %(status)s %(reason)s') % {'status': resp.status_code, 'reason': resp.reason}) return resp def _get_req_params(self, data=None): req_params = { 'headers': { 'Accept': 'application/json' }, 'data': data, 'allow_redirects': False, 'timeout': CONF.http_timeout, } return req_params @staticmethod def _log_req(url, req_params): if not CONF.debug: return curl_command = ['REQ: curl -i -X GET '] params = [] for name, value in six.iteritems(req_params['data']): params.append("%s=%s" % (name, value)) curl_command.append('"%s?%s" ' % (url, '&'.join(params))) for name, value in six.iteritems(req_params['headers']): curl_command.append('-H "%s: %s" ' % (name, value)) LOG.debug(''.join(curl_command)) @staticmethod def _log_res(resp): if not CONF.debug: return dump = ['RES: \n', 'HTTP %.1f %s %s\n' % (resp.raw.version, resp.status_code, resp.reason)] dump.extend('%s: %s\n' % (k, v) for k, v in six.iteritems(resp.headers)) dump.append('\n') if resp.content: dump.extend([resp.content, '\n']) LOG.debug(''.join(dump)) class NetworksAPIClient(AnalyticsAPIBaseClient): """Opencontrail Statistics REST API Client.""" def get_vm_statistics(self, fqdn_uuid, data=None): """Get statistics of a virtual-machines. URL: {endpoint}/analytics/uves/virtual-machine/{fqdn_uuid} """ path = '/analytics/uves/virtual-machine/' resp = self.request(path, fqdn_uuid, data) return resp.json() class Client(object): def __init__(self, endpoint, data=None): self.networks = NetworksAPIClient(endpoint, data) ceilometer-6.1.5/ceilometer/network/statistics/opencontrail/__init__.py0000664000567000056710000000000013072744703027632 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/network/statistics/__init__.py0000664000567000056710000000656113072744706025162 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_utils import netutils import six from six.moves.urllib import parse as urlparse from stevedore import driver as _driver from ceilometer.agent import plugin_base from ceilometer import sample @six.add_metaclass(abc.ABCMeta) class _Base(plugin_base.PollsterBase): NAMESPACE = 'network.statistics.drivers' drivers = {} @property def default_discovery(self): # this signifies that the pollster gets its resources from # elsewhere, in this case they're manually listed in the # pipeline configuration return None @abc.abstractproperty def meter_name(self): """Return a Meter Name.""" @abc.abstractproperty def meter_type(self): """Return a Meter Type.""" @abc.abstractproperty def meter_unit(self): """Return a Meter Unit.""" @staticmethod def _parse_my_resource(resource): parse_url = netutils.urlsplit(resource) params = urlparse.parse_qs(parse_url.query) parts = urlparse.ParseResult(parse_url.scheme, parse_url.netloc, parse_url.path, None, None, None) return parts, params @staticmethod def get_driver(scheme): if scheme not in _Base.drivers: _Base.drivers[scheme] = _driver.DriverManager(_Base.NAMESPACE, scheme).driver() return _Base.drivers[scheme] def get_samples(self, manager, cache, resources): resources = resources or [] for resource in resources: parse_url, params = self._parse_my_resource(resource) ext = self.get_driver(parse_url.scheme) sample_data = ext.get_sample_data(self.meter_name, parse_url, params, cache) for data in sample_data or []: if data is None: continue if not isinstance(data, list): data = [data] for (volume, resource_id, resource_metadata, timestamp) in data: yield sample.Sample( name=self.meter_name, type=self.meter_type, unit=self.meter_unit, volume=volume, user_id=None, project_id=None, resource_id=resource_id, timestamp=timestamp, resource_metadata=resource_metadata ) ceilometer-6.1.5/ceilometer/network/services/0000775000567000056710000000000013072745164022471 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/network/services/discovery.py0000664000567000056710000000734313072744706025062 0ustar jenkinsjenkins00000000000000# # Copyright (c) 2014 Cisco Systems, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.agent import plugin_base from ceilometer import neutron_client class _BaseServicesDiscovery(plugin_base.DiscoveryBase): KEYSTONE_REQUIRED_FOR_SERVICE = 'neutron' def __init__(self): super(_BaseServicesDiscovery, self).__init__() self.neutron_cli = neutron_client.Client() class LBPoolsDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" pools = self.neutron_cli.pool_get_all() return [i for i in pools if i.get('status') != 'error'] class LBVipsDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" vips = self.neutron_cli.vip_get_all() return [i for i in vips if i.get('status', None) != 'error'] class LBMembersDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" members = self.neutron_cli.member_get_all() return [i for i in members if i.get('status', None) != 'error'] class LBListenersDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover load balancer listener resources to monitor.""" listeners = self.neutron_cli.list_listener() return [i for i in listeners if i.get('operating_status', None) != 'error'] class LBLoadBalancersDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover load balancer resources to monitor.""" loadbalancers = self.neutron_cli.list_loadbalancer() return [i for i in loadbalancers if i.get('operating_status', None) != 'error'] class LBHealthMonitorsDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" probes = self.neutron_cli.health_monitor_get_all() return probes class VPNServicesDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" vpnservices = self.neutron_cli.vpn_get_all() return [i for i in vpnservices if i.get('status', None) != 'error'] class IPSecConnectionsDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" conns = self.neutron_cli.ipsec_site_connections_get_all() return conns class FirewallDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" fw = self.neutron_cli.firewall_get_all() return [i for i in fw if i.get('status', None) != 'error'] class FirewallPolicyDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" return self.neutron_cli.fw_policy_get_all() class FloatingIPDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover floating IP resources to monitor.""" return self.neutron_cli.fip_get_all() ceilometer-6.1.5/ceilometer/network/services/fwaas.py0000664000567000056710000000577413072744706024162 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_utils import timeutils from ceilometer.i18n import _ from ceilometer.network.services import base from ceilometer import sample LOG = log.getLogger(__name__) class FirewallPollster(base.BaseServicesPollster): """Pollster to capture firewalls status samples.""" FIELDS = ['admin_state_up', 'description', 'name', 'status', 'firewall_policy_id', ] @property def default_discovery(self): return 'fw_services' def get_samples(self, manager, cache, resources): resources = resources or [] for fw in resources: LOG.debug("Firewall : %s" % fw) status = self.get_status_id(fw['status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received on fw %(id)s," "skipping sample") % {'stat': fw['status'], 'id': fw['id']}) continue yield sample.Sample( name='network.services.firewall', type=sample.TYPE_GAUGE, unit='firewall', volume=status, user_id=None, project_id=fw['tenant_id'], resource_id=fw['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(fw) ) class FirewallPolicyPollster(base.BaseServicesPollster): """Pollster to capture firewall policy samples.""" FIELDS = ['name', 'description', 'name', 'firewall_rules', 'shared', 'audited', ] @property def default_discovery(self): return 'fw_policy' def get_samples(self, manager, cache, resources): resources = resources or [] for fw in resources: LOG.debug("Firewall Policy: %s" % fw) yield sample.Sample( name='network.services.firewall.policy', type=sample.TYPE_GAUGE, unit='firewall_policy', volume=1, user_id=None, project_id=fw['tenant_id'], resource_id=fw['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(fw) ) ceilometer-6.1.5/ceilometer/network/services/lbaas.py0000664000567000056710000003623713072744706024141 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six from ceilometer.i18n import _ from ceilometer.network.services import base from ceilometer import neutron_client from ceilometer import sample LOG = log.getLogger(__name__) LBStatsData = collections.namedtuple( 'LBStats', ['active_connections', 'total_connections', 'bytes_in', 'bytes_out'] ) LOAD_BALANCER_STATUS_V2 = { 'offline': 0, 'online': 1, 'no_monitor': 3, 'error': 4, 'degraded': 5 } class BaseLBPollster(base.BaseServicesPollster): """Base Class for Load Balancer pollster""" def __init__(self): super(BaseLBPollster, self).__init__() self.lb_version = cfg.CONF.service_types.neutron_lbaas_version def get_load_balancer_status_id(self, value): if self.lb_version == 'v1': resource_status = self.get_status_id(value) elif self.lb_version == 'v2': status = value.lower() resource_status = LOAD_BALANCER_STATUS_V2.get(status, -1) return resource_status class LBPoolPollster(BaseLBPollster): """Pollster to capture Load Balancer pool status samples.""" FIELDS = ['admin_state_up', 'description', 'lb_method', 'name', 'protocol', 'provider', 'status', 'status_description', 'subnet_id', 'vip_id' ] @property def default_discovery(self): return 'lb_pools' def get_samples(self, manager, cache, resources): resources = resources or [] for pool in resources: LOG.debug("Load Balancer Pool : %s" % pool) status = self.get_load_balancer_status_id(pool['status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received on pool " "%(id)s, skipping sample") % {'stat': pool['status'], 'id': pool['id']}) continue yield sample.Sample( name='network.services.lb.pool', type=sample.TYPE_GAUGE, unit='pool', volume=status, user_id=None, project_id=pool['tenant_id'], resource_id=pool['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(pool) ) class LBVipPollster(base.BaseServicesPollster): """Pollster to capture Load Balancer Vip status samples.""" FIELDS = ['admin_state_up', 'address', 'connection_limit', 'description', 'name', 'pool_id', 'port_id', 'protocol', 'protocol_port', 'status', 'status_description', 'subnet_id', 'session_persistence', ] @property def default_discovery(self): return 'lb_vips' def get_samples(self, manager, cache, resources): resources = resources or [] for vip in resources: LOG.debug("Load Balancer Vip : %s" % vip) status = self.get_status_id(vip['status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received on vip " "%(id)s, skipping sample") % {'stat': vip['status'], 'id': vip['id']}) continue yield sample.Sample( name='network.services.lb.vip', type=sample.TYPE_GAUGE, unit='vip', volume=status, user_id=None, project_id=vip['tenant_id'], resource_id=vip['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(vip) ) class LBMemberPollster(BaseLBPollster): """Pollster to capture Load Balancer Member status samples.""" FIELDS = ['admin_state_up', 'address', 'pool_id', 'protocol_port', 'status', 'status_description', 'weight', ] @property def default_discovery(self): return 'lb_members' def get_samples(self, manager, cache, resources): resources = resources or [] for member in resources: LOG.debug("Load Balancer Member : %s" % member) status = self.get_load_balancer_status_id(member['status']) if status == -1: LOG.warning(_("Unknown status %(stat)s received on member " "%(id)s, skipping sample") % {'stat': member['status'], 'id': member['id']}) continue yield sample.Sample( name='network.services.lb.member', type=sample.TYPE_GAUGE, unit='member', volume=status, user_id=None, project_id=member['tenant_id'], resource_id=member['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(member) ) class LBHealthMonitorPollster(base.BaseServicesPollster): """Pollster to capture Load Balancer Health probes status samples.""" FIELDS = ['admin_state_up', 'delay', 'max_retries', 'pools', 'timeout', 'type' ] @property def default_discovery(self): return 'lb_health_probes' def get_samples(self, manager, cache, resources): for probe in resources: LOG.debug("Load Balancer Health probe : %s" % probe) yield sample.Sample( name='network.services.lb.health_monitor', type=sample.TYPE_GAUGE, unit='health_monitor', volume=1, user_id=None, project_id=probe['tenant_id'], resource_id=probe['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(probe) ) @six.add_metaclass(abc.ABCMeta) class _LBStatsPollster(base.BaseServicesPollster): """Base Statistics pollster. It is capturing the statistics info and yielding samples for connections and bandwidth. """ def __init__(self): super(_LBStatsPollster, self).__init__() self.client = neutron_client.Client() self.lb_version = cfg.CONF.service_types.neutron_lbaas_version @staticmethod def make_sample_from_pool(pool, name, type, unit, volume, resource_metadata=None): if not resource_metadata: resource_metadata = {} return sample.Sample( name=name, type=type, unit=unit, volume=volume, user_id=None, project_id=pool['tenant_id'], resource_id=pool['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=resource_metadata, ) def _populate_stats_cache(self, pool_id, cache): i_cache = cache.setdefault("lbstats", {}) if pool_id not in i_cache: stats = self.client.pool_stats(pool_id)['stats'] i_cache[pool_id] = LBStatsData( active_connections=stats['active_connections'], total_connections=stats['total_connections'], bytes_in=stats['bytes_in'], bytes_out=stats['bytes_out'], ) return i_cache[pool_id] def _populate_stats_cache_v2(self, loadbalancer_id, cache): i_cache = cache.setdefault("lbstats", {}) if loadbalancer_id not in i_cache: stats = self.client.get_loadbalancer_stats(loadbalancer_id) i_cache[loadbalancer_id] = LBStatsData( active_connections=stats['active_connections'], total_connections=stats['total_connections'], bytes_in=stats['bytes_in'], bytes_out=stats['bytes_out'], ) return i_cache[loadbalancer_id] @property def default_discovery(self): discovery_resource = 'lb_pools' if self.lb_version == 'v2': discovery_resource = 'lb_loadbalancers' return discovery_resource @abc.abstractmethod def _get_sample(pool, c_data): """Return one Sample.""" def get_samples(self, manager, cache, resources): if self.lb_version == 'v1': for pool in resources: try: c_data = self._populate_stats_cache(pool['id'], cache) yield self._get_sample(pool, c_data) except Exception: LOG.exception(_('Ignoring pool %(pool_id)s'), {'pool_id': pool['id']}) elif self.lb_version == 'v2': for loadbalancer in resources: try: c_data = self._populate_stats_cache_v2(loadbalancer['id'], cache) yield self._get_sample(loadbalancer, c_data) except Exception: LOG.exception( _('Ignoring ' 'loadbalancer %(loadbalancer_id)s'), {'loadbalancer_id': loadbalancer['id']}) class LBActiveConnectionsPollster(_LBStatsPollster): """Pollster to capture Active Load Balancer connections.""" @staticmethod def _get_sample(pool, data): return make_sample_from_pool( pool, name='network.services.lb.active.connections', type=sample.TYPE_GAUGE, unit='connection', volume=data.active_connections, ) class LBTotalConnectionsPollster(_LBStatsPollster): """Pollster to capture Total Load Balancer connections.""" @staticmethod def _get_sample(pool, data): return make_sample_from_pool( pool, name='network.services.lb.total.connections', type=sample.TYPE_CUMULATIVE, unit='connection', volume=data.total_connections, ) class LBBytesInPollster(_LBStatsPollster): """Pollster to capture incoming bytes.""" @staticmethod def _get_sample(pool, data): return make_sample_from_pool( pool, name='network.services.lb.incoming.bytes', type=sample.TYPE_GAUGE, unit='B', volume=data.bytes_in, ) class LBBytesOutPollster(_LBStatsPollster): """Pollster to capture outgoing bytes.""" @staticmethod def _get_sample(pool, data): return make_sample_from_pool( pool, name='network.services.lb.outgoing.bytes', type=sample.TYPE_GAUGE, unit='B', volume=data.bytes_out, ) def make_sample_from_pool(pool, name, type, unit, volume, resource_metadata=None): resource_metadata = resource_metadata or {} return sample.Sample( name=name, type=type, unit=unit, volume=volume, user_id=None, project_id=pool['tenant_id'], resource_id=pool['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=resource_metadata, ) class LBListenerPollster(BaseLBPollster): """Pollster to capture Load Balancer Listener status samples.""" FIELDS = ['admin_state_up', 'connection_limit', 'description', 'name', 'default_pool_id', 'protocol', 'protocol_port', 'operating_status', 'loadbalancers' ] @property def default_discovery(self): return 'lb_listeners' def get_samples(self, manager, cache, resources): resources = resources or [] for listener in resources: LOG.debug("Load Balancer Listener : %s" % listener) status = self.get_load_balancer_status_id( listener['operating_status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received on listener " "%(id)s, skipping sample") % {'stat': listener['operating_status'], 'id': listener['id']}) continue yield sample.Sample( name='network.services.lb.listener', type=sample.TYPE_GAUGE, unit='listener', volume=status, user_id=None, project_id=listener['tenant_id'], resource_id=listener['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(listener) ) class LBLoadBalancerPollster(BaseLBPollster): """Pollster to capture Load Balancer status samples.""" FIELDS = ['admin_state_up', 'description', 'vip_address', 'listeners', 'name', 'vip_subnet_id', 'operating_status', ] @property def default_discovery(self): return 'lb_loadbalancers' def get_samples(self, manager, cache, resources): resources = resources or [] for loadbalancer in resources: LOG.debug("Load Balancer: %s" % loadbalancer) status = self.get_load_balancer_status_id( loadbalancer['operating_status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received " "on Load Balancer " "%(id)s, skipping sample") % {'stat': loadbalancer['operating_status'], 'id': loadbalancer['id']}) continue yield sample.Sample( name='network.services.lb.loadbalancer', type=sample.TYPE_GAUGE, unit='loadbalancer', volume=status, user_id=None, project_id=loadbalancer['tenant_id'], resource_id=loadbalancer['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(loadbalancer) ) ceilometer-6.1.5/ceilometer/network/services/base.py0000664000567000056710000000246313072744706023763 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.agent import plugin_base # status map for converting metric status to volume int STATUS = { 'inactive': 0, 'active': 1, 'pending_create': 2, 'down': 3, 'created': 4, 'pending_update': 5, 'pending_delete': 6, 'error': 7, } class BaseServicesPollster(plugin_base.PollsterBase): FIELDS = [] @staticmethod def _iter_cache(cache, meter_name, method): if meter_name not in cache: cache[meter_name] = list(method()) return iter(cache[meter_name]) def extract_metadata(self, metric): return dict((k, metric[k]) for k in self.FIELDS) @staticmethod def get_status_id(value): status = value.lower() return STATUS.get(status, -1) ceilometer-6.1.5/ceilometer/network/services/vpnaas.py0000664000567000056710000000640313072744706024337 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_utils import timeutils from ceilometer.i18n import _ from ceilometer.network.services import base from ceilometer import sample LOG = log.getLogger(__name__) class VPNServicesPollster(base.BaseServicesPollster): """Pollster to capture VPN status samples.""" FIELDS = ['admin_state_up', 'description', 'name', 'status', 'subnet_id', 'router_id' ] @property def default_discovery(self): return 'vpn_services' def get_samples(self, manager, cache, resources): resources = resources or [] for vpn in resources: LOG.debug("VPN : %s" % vpn) status = self.get_status_id(vpn['status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received on vpn " "%(id)s, skipping sample") % {'stat': vpn['status'], 'id': vpn['id']}) continue yield sample.Sample( name='network.services.vpn', type=sample.TYPE_GAUGE, unit='vpnservice', volume=status, user_id=None, project_id=vpn['tenant_id'], resource_id=vpn['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(vpn) ) class IPSecConnectionsPollster(base.BaseServicesPollster): """Pollster to capture vpn ipsec connections status samples.""" FIELDS = ['name', 'description', 'peer_address', 'peer_id', 'peer_cidrs', 'psk', 'initiator', 'ikepolicy_id', 'dpd', 'ipsecpolicy_id', 'vpnservice_id', 'mtu', 'admin_state_up', 'tenant_id' ] @property def default_discovery(self): return 'ipsec_connections' def get_samples(self, manager, cache, resources): resources = resources or [] for conn in resources: LOG.debug("IPSec Connection Info: %s" % conn) yield sample.Sample( name='network.services.vpn.connections', type=sample.TYPE_GAUGE, unit='ipsec_site_connection', volume=1, user_id=None, project_id=conn['tenant_id'], resource_id=conn['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(conn) ) ceilometer-6.1.5/ceilometer/network/services/__init__.py0000664000567000056710000000000013072744703024566 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/network/__init__.py0000664000567000056710000000000013072744703022743 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/network/floatingip.py0000664000567000056710000000410613072744706023356 0ustar jenkinsjenkins00000000000000# Copyright 2016 Sungard Availability Services # Copyright 2016 Red Hat # Copyright 2012 eNovance # Copyright 2013 IBM Corp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from ceilometer.i18n import _LW from ceilometer.network.services import base from ceilometer import sample LOG = log.getLogger(__name__) cfg.CONF.import_group('service_types', 'ceilometer.neutron_client') class FloatingIPPollster(base.BaseServicesPollster): FIELDS = ['router_id', 'status', 'floating_network_id', 'fixed_ip_address', 'port_id', 'floating_ip_address', ] @property def default_discovery(self): return 'fip_services' def get_samples(self, manager, cache, resources): for fip in resources or []: if fip['status'] is None: LOG.warning(_LW("Invalid status, skipping IP address %s") % fip['floating_ip_address']) continue status = self.get_status_id(fip['status']) yield sample.Sample( name='ip.floating', type=sample.TYPE_GAUGE, unit='ip', volume=status, user_id=fip.get('user_id'), project_id=fip['tenant_id'], resource_id=fip['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(fip) ) ceilometer-6.1.5/ceilometer/opts.py0000664000567000056710000001320313072744706020514 0ustar jenkinsjenkins00000000000000# Copyright 2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from keystoneauth1 import loading import ceilometer.agent.manager import ceilometer.api import ceilometer.api.app import ceilometer.cmd.polling import ceilometer.collector import ceilometer.compute.discovery import ceilometer.compute.notifications import ceilometer.compute.util import ceilometer.compute.virt.inspector import ceilometer.compute.virt.libvirt.inspector import ceilometer.compute.virt.vmware.inspector import ceilometer.compute.virt.xenapi.inspector import ceilometer.coordination import ceilometer.dispatcher import ceilometer.dispatcher.file import ceilometer.dispatcher.gnocchi import ceilometer.energy.kwapi import ceilometer.event.converter import ceilometer.hardware.discovery import ceilometer.image.glance import ceilometer.ipmi.notifications.ironic import ceilometer.ipmi.platform.intel_node_manager import ceilometer.ipmi.pollsters import ceilometer.keystone_client import ceilometer.meter.notifications import ceilometer.middleware import ceilometer.network.notifications import ceilometer.neutron_client import ceilometer.notification import ceilometer.nova_client import ceilometer.objectstore.rgw import ceilometer.objectstore.swift import ceilometer.pipeline import ceilometer.publisher.messaging import ceilometer.publisher.utils import ceilometer.sample import ceilometer.service import ceilometer.storage import ceilometer.utils def list_opts(): return [ ('DEFAULT', itertools.chain(ceilometer.agent.manager.OPTS, ceilometer.api.app.OPTS, ceilometer.cmd.polling.CLI_OPTS, ceilometer.compute.notifications.OPTS, ceilometer.compute.util.OPTS, ceilometer.compute.virt.inspector.OPTS, ceilometer.compute.virt.libvirt.inspector.OPTS, ceilometer.dispatcher.OPTS, ceilometer.image.glance.OPTS, ceilometer.ipmi.notifications.ironic.OPTS, ceilometer.middleware.OPTS, ceilometer.network.notifications.OPTS, ceilometer.nova_client.OPTS, ceilometer.objectstore.swift.OPTS, ceilometer.pipeline.OPTS, ceilometer.sample.OPTS, ceilometer.service.OPTS, ceilometer.storage.OLD_OPTS, ceilometer.storage.CLI_OPTS, ceilometer.utils.OPTS,)), ('api', itertools.chain(ceilometer.api.OPTS, ceilometer.api.app.API_OPTS, [ceilometer.service.API_OPT])), # deprecated path, new one is 'polling' ('central', ceilometer.agent.manager.OPTS), ('collector', itertools.chain(ceilometer.collector.OPTS, [ceilometer.service.COLL_OPT])), ('compute', ceilometer.compute.discovery.OPTS), ('coordination', ceilometer.coordination.OPTS), ('database', ceilometer.storage.OPTS), ('dispatcher_file', ceilometer.dispatcher.file.OPTS), ('dispatcher_gnocchi', ceilometer.dispatcher.gnocchi.dispatcher_opts), ('event', ceilometer.event.converter.OPTS), ('exchange_control', ceilometer.exchange_control.EXCHANGE_OPTS), ('hardware', ceilometer.hardware.discovery.OPTS), ('ipmi', itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS, ceilometer.ipmi.pollsters.OPTS)), ('meter', ceilometer.meter.notifications.OPTS), ('notification', itertools.chain(ceilometer.notification.OPTS, [ceilometer.service.NOTI_OPT])), ('polling', ceilometer.agent.manager.OPTS), ('publisher', ceilometer.publisher.utils.OPTS), ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), ('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS), # NOTE(sileht): the configuration file contains only the options # for the password plugin that handles keystone v2 and v3 API # with discovery. But other options are possible. # Also, the default loaded plugin is password-ceilometer-legacy for # backward compatibily ('service_credentials', ( ceilometer.keystone_client.CLI_OPTS + loading.get_auth_common_conf_options() + loading.get_auth_plugin_conf_options('password'))), ('service_types', itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS, ceilometer.image.glance.SERVICE_OPTS, ceilometer.neutron_client.SERVICE_OPTS, ceilometer.nova_client.SERVICE_OPTS, ceilometer.objectstore.rgw.SERVICE_OPTS, ceilometer.objectstore.swift.SERVICE_OPTS,)), ('storage', ceilometer.dispatcher.STORAGE_OPTS), ('vmware', ceilometer.compute.virt.vmware.inspector.OPTS), ('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS), ] ceilometer-6.1.5/ceilometer/locale/0000775000567000056710000000000013072745164020414 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/zh_TW/0000775000567000056710000000000013072745164021447 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/zh_TW/LC_MESSAGES/0000775000567000056710000000000013072745164023234 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po0000664000567000056710000003553213072744706025735 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Stefano Maffulli , 2013 # Jennifer , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-05-19 11:00+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-15 07:53+0000\n" "Last-Translator: Jennifer \n" "Language: zh-TW\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (Taiwan)\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "找ä¸åˆ° %(entity)s %(id)s" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "åœ¨è¡¨ç¤ºå¼ '%s' 中,算術轉æ›å™¨å¿…須至少使用一種計é‡" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "無法建立表格 %(table_name)s,該表格已經存在。將忽略錯誤" #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "在 %(name)s 傳回錯誤 %(error)s 後繼續" #, python-format msgid "Could not connect slave host: %s " msgstr "無法連接附屬主機:%s" #, python-format msgid "Could not connect to XenAPI: %s" msgstr "無法連接 XenAPI:%s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "無法å–å¾— %(id)s çš„ CPU 使用率:%(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "無法å–å¾— %(id)s 的記憶體用é‡ï¼š%(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "無法å–å¾— VM %s CPU 使用率" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "無法å–得實例 %s çš„ IP ä½å€" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "未設定分派器目標,將ä¸å…¬ä½ˆä»»ä½•計é‡ã€‚請在ceilometer.conf 檔中設定目標" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "æ­£åœ¨æ¨æ£„通知 %(type)s(UUID:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "查閱實例 時,libvirt 中發生錯誤:[錯誤碼 " "%(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "å‰–æž HTTP 回應時發生錯誤:%s" msgid "Error stopping pollster." msgstr "åœæ­¢ pollster 時發生錯誤。" msgid "Event" msgstr "事件" msgid "Expression evaluated to a NaN value!" msgstr "è¡¨ç¤ºå¼æ±‚å€¼çµæžœç‚ºéžæ•¸å­—值ï¼" #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "無法匯入 %(name)s 的延伸:%(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "無法檢查實例 的資料,網域狀態為 SHUTOFF。" #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "無法檢查 %(instance_uuid)s 的記憶體用é‡ï¼Œç„¡æ³•從 libvirt å–得資訊:%(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "無法檢查實例 的記憶體用é‡ï¼Œç„¡æ³•從 libvirt å–得資" "訊。" #, python-format msgid "Failed to load any notification handlers for %s" msgstr "無法載入 %s 的任何通知處ç†ç¨‹å¼" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "ç„¡æ³•å‰–æžæ™‚間戳記值 %s" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "無法發佈 %d å€‹è³‡æ–™é»žï¼Œæ­£åœ¨æ¨æ£„它們" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "無法發佈 %d 個資料點,正在將它們排入佇列" #, python-format msgid "Failed to record metering data: %s" msgstr "無法記錄計é‡è³‡æ–™ï¼š%s" #, python-format msgid "Filter expression not valid: %s" msgstr "éŽæ¿¾è¡¨ç¤ºå¼ç„¡æ•ˆï¼š%s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "正在忽略實例 %(name)s (%(instance_id)s):%(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "正在忽略實例 %(name)s:%(error)s" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "正在忽略負載平衡器 %(loadbalancer_id)s" #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "æ­£åœ¨å¿½ç•¥å„²å­˜å€ %(pool_id)s" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "定義檔 %(file)s 第 %(line)s 行第 %(column)s 直欄中的 YAML 語法無效。" #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "時間段 %(period)s 無效:%(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "特質 %(trait)s 的特質類型 '%(type)s' 無效" msgid "Limit must be positive" msgstr "é™åˆ¶å€¼å¿…須是正數" #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "從儲存體驅動程å¼å‚³å›žäº†å¤šæ–¼ä¸€å€‹ ID 為 %s 的事件" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "在 XenServer 中找到多個 VM %s" msgid "Must specify connection_url, and connection_password to use" msgstr "必須指定 connection_url å’Œ connection_password,æ‰èƒ½ä½¿ç”¨" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "沒有å為 %(plugin)s 的外掛程å¼å¯ä¾› %(name)s 使用" msgid "Node Manager init failed" msgstr "節點管ç†ç¨‹å¼èµ·å§‹è¨­å®šå¤±æ•—" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "æœªç²æŽˆæ¬Šä¾†å­˜å– %(aspect)s %(id)s" #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "OpenDaylight API 傳回了 %(status)s %(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "Opencontrail API 傳回了 %(status)s %(reason)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "é‹ç®—å­ %(operator)s ä¸å—支æ´ã€‚åªæœ‰ç­‰å¼é‹ç®—å­æ‰å¯ä¾›æ¬„ä½ %(field)s 使用" #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "é‹ç®—å­ %(operator)s ä¸å—支æ´ã€‚å—æ”¯æ´çš„é‹ç®—å­ç‚ºï¼š%(supported)s" #, python-format msgid "Order-by expression not valid: %s" msgstr "æŽ’åºæ–¹å¼è¡¨ç¤ºå¼ç„¡æ•ˆï¼š%s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "%(name)s çš„ JSONPath è¦æ ¼ '%(jsonpath)s' 中發生剖æžéŒ¯èª¤ï¼š%(err)s" msgid "Period must be positive." msgstr "時間段必須是正數。" #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "管線 %(pipeline)s:在發佈者 %(pub)s 傳回錯誤後處於%(status)s狀態" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "管線 %(pipeline)s:在發佈者 %(pub)s 傳回錯誤後繼續" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "管線 %(pipeline)s:清除轉æ›å™¨ %(trans)s 時發生錯誤" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "管線 %(pipeline)s:%(smp)s 的轉æ›å™¨ %(trans)s å‚³å›žéŒ¯èª¤å¾ŒçµæŸ" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "已指定外掛程å¼ï¼Œä½†å»æœªå‘ %s æä¾›å¤–掛程å¼å稱" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "輪詢 %(mtr)s 感應器已失敗 %(cnt)s 次ï¼" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "輪詢 %(name)s 已失敗 %(cnt)s 次ï¼" #, python-format msgid "Pollster for %s is disabled!" msgstr "å·²åœç”¨ %s çš„ Pollsterï¼" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "阻止 pollster %(name)s 冿¬¡è¼ªè©¢è³‡æº %(source)sï¼" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "已超出發佈者 local_queue 長度上é™ï¼Œæ­£åœ¨æ¨æ£„ %d 個最舊的樣本" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "ç™¼ä½ˆåŽŸå‰‡ä¸æ˜Ž (%s),強制設為é è¨­å€¼" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW AdminOps API 傳回了 %(status)s %(reason)s" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "è¦æ±‚無法使用 NorthBound REST API 來連接至 OpenDaylight" #, python-format msgid "Required field %s not specified" msgstr "æœªæŒ‡å®šå¿…è¦æ¬„ä½ %s" msgid "Resource" msgstr "資æº" msgid "Sample" msgstr "樣本" msgid "Samples should be included in request body" msgstr "è¦æ±‚內文中應該包括範例" #, python-format msgid "Skip loading extension for %s" msgstr "è·³éŽè¼‰å…¥ %s 的延伸" #, python-format msgid "String %s is not a valid isotime" msgstr "字串 %s 䏿˜¯æœ‰æ•ˆçš„ ISO 時間" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "用來定義範例與 gnocchi 資æº/度é‡ä¹‹é–“之尿˜ çš„Yaml 檔案" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "䏿”¯æ´è³‡æ–™é¡žåž‹ %(type)s。支æ´çš„資料類型清單為:%(supported)s" #, python-format msgid "The field 'fields' is required for %s" msgstr "%s éœ€è¦æ¬„ä½ã€Œæ¬„ä½ã€" msgid "The path for the file publisher is required" msgstr "éœ€è¦æª”案發佈者的路徑" #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "UDP:無法解碼由 %s 傳é€çš„資料" msgid "UDP: Unable to store meter" msgstr "UDP:無法儲存計é‡" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "無法連接至資料庫伺æœå™¨ï¼š%(errmsg)s。" #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "無法將值 %(value)s è½‰æ›æˆé æœŸçš„資料類型 %(type)s。" #, python-format msgid "Unable to discover resources: %s" msgstr "無法探索資æºï¼š%s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "無法å°è¡¨ç¤ºå¼ %(expr)s 進行求值:%(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "無法載入發佈者 %s" #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "無法載入 Hypervisor 檢查程å¼ï¼š%s" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "在 %(retries)d 次é‡è©¦ä¹‹å¾Œä»ç„¡æ³•釿–°é€£æŽ¥è‡³ä¸»è¦ MongoDB。正在放棄。" #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "ç„¡æ³•é‡æ–°é€£æŽ¥è‡³ä¸»è¦ MongoDB:%(errmsg)s。請在%(retry_interval)d ç§’ä¹‹å¾Œå†æ¬¡å˜—" "試。" msgid "Unable to send sample over UDP" msgstr "無法é€éŽ UDP 來傳逿¨£æœ¬" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "å°‡ %(value)s 轉æ›ç‚ºé æœŸçš„資料類型%(type)s 時發生éžé æœŸçš„異常狀æ³ã€‚" #, python-format msgid "Unknown discovery extension: %s" msgstr "䏿˜Žçš„æŽ¢ç´¢å»¶ä¼¸ï¼š%s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "䏿˜Žçš„ meta è³‡æ–™é¡žåž‹ã€‚ç´¢å¼•éµ (%s) å°‡ä¸å¯æŸ¥è©¢ã€‚" #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "在負載平衡器 %(id)s ä¸ŠæŽ¥æ”¶åˆ°ä¸æ˜Žç‹€æ…‹ %(stat)s,正在跳éŽç¯„例" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "在防ç«ç‰† %(id)s ä¸ŠæŽ¥æ”¶åˆ°ä¸æ˜Žç‹€æ…‹ %(stat)s,正在跳éŽç¯„例" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "在接è½å™¨ %(id)s ä¸ŠæŽ¥æ”¶åˆ°ä¸æ˜Žç‹€æ…‹ %(stat)s,正在跳éŽç¯„例" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "在æˆå“¡ %(id)s ä¸ŠæŽ¥æ”¶åˆ°ä¸æ˜Žç‹€æ…‹ %(stat)s,正在跳éŽç¯„例" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "åœ¨å„²å­˜å€ %(id)s ä¸ŠæŽ¥æ”¶åˆ°ä¸æ˜Žç‹€æ…‹ %(stat)s,正在跳éŽç¯„例" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "在 VIP %(id)s ä¸ŠæŽ¥æ”¶åˆ°ä¸æ˜Žç‹€æ…‹ %(stat)s,正在跳éŽç¯„例" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "在 VPN %(id)s ä¸ŠæŽ¥æ”¶åˆ°ä¸æ˜Žç‹€æ…‹ %(stat)s,正在跳éŽç¯„例" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "在 VMware vSphere 中找ä¸åˆ° VM %s" #, python-format msgid "VM %s not found in XenServer" msgstr "在 XenServer 中找ä¸åˆ° VM %s" msgid "Wrong sensor type" msgstr "感應器類型錯誤" msgid "XenAPI not installed" msgstr "æœªå®‰è£ XenAPI" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "讀å–定義檔 %(file)s 時發生 YAML 錯誤" msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." msgstr "ç•¶å·²åœç”¨æˆ–無法使用 Aodh 時,無法使用警示 URL" #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "無法å–å¾— %(id)s çš„ CPU 時間:%(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "已啟用 Gnocchi 時,直接é¸é …ä¸èƒ½ç‚º true。" #, python-format msgid "dropping out of time order sample: %s" msgstr "正在刪除ä¸åœ¨æ™‚é–“é †åºå…§çš„範例:%s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "æ­£åœ¨æ¨æ£„ä¸å«å‰ä¸€ç‰ˆæœ¬çš„æ¨£æœ¬ï¼š%s" msgid "ipmitool output length mismatch" msgstr "ipmitool 輸出長度ä¸ç¬¦" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes åŠ backup_count 應該是數字。" #, python-format msgid "message signature invalid, discarding message: %r" msgstr "訊æ¯ç°½ç« ç„¡æ•ˆï¼Œæ­£åœ¨æ¨æ£„訊æ¯ï¼š%r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "å‰–æž IPMI 感應器資料失敗,未從給定的輸入擷å–任何資料" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "å‰–æž IPMI æ„Ÿæ‡‰å™¨è³‡æ–™å¤±æ•—ï¼Œæ„Ÿæ‡‰å™¨é¡žåž‹ä¸æ˜Ž" msgid "running ipmitool failure" msgstr "執行 ipmitool 失敗" ceilometer-6.1.5/ceilometer/locale/ko_KR/0000775000567000056710000000000013072745164021421 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/ko_KR/LC_MESSAGES/0000775000567000056710000000000013072745164023206 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-error.po0000664000567000056710000001200713072744706027605 0ustar jenkinsjenkins00000000000000# SeYeon Lee , 2016. #zanata # Sungjin Kang , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-05-19 11:00+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-25 02:00+0000\n" "Last-Translator: SeYeon Lee \n" "Language-Team: Korean (South Korea)\n" "Language: ko-KR\n" "X-Generator: Zanata 3.7.3\n" "Plural-Forms: nplurals=1; plural=0\n" #, python-format msgid "Cannot load inspector %(name)s: %(err)s" msgstr "%(name)s 검사기를 로드할 수 ì—†ìŒ: %(err)s" #, python-format msgid "Could not get Resident Memory Usage for %(id)s: %(e)s" msgstr "%(id)sì˜ ìƒì£¼ 메모리 ì‚¬ìš©ì„ ê°€ì ¸ì˜¬ 수 ì—†ìŒ : %(e)s" #, python-format msgid "Dispatcher failed to handle the %s, requeue it." msgstr "디스패처ì—서 %sì„(를) 처리하지 못하여 다시 ëŒ€ê¸°ì—´ì— ë‘¡ë‹ˆë‹¤." msgid "Error connecting to coordination backend." msgstr "ì¡°ì • 백엔드를 연결하는 ì¤‘ì— ì˜¤ë¥˜ê°€ ë°œìƒí–ˆìŠµë‹ˆë‹¤." msgid "Error getting group membership info from coordination backend." msgstr "ì¡°ì • 백엔드ì—서 그룹 멤버십 정보를 가져오는 ì¤‘ì— ì˜¤ë¥˜ê°€ ë°œìƒí–ˆìŠµë‹ˆë‹¤." #, python-format msgid "Error joining partitioning group %s, re-trying" msgstr "" "파티션 지정 그룹 %sì„(를) 결합하는 ì¤‘ì— ì˜¤ë¥˜ê°€ ë°œìƒí•˜ì—¬ 다시 시ë„하는 중입니" "다." #, python-format msgid "Error loading meter definition : %(err)s" msgstr "측정 ì •ì˜ ë¡œë“œ 오류 : %(err)s" #, python-format msgid "Error processing event and it will be dropped: %s" msgstr "ì´ë²¤íЏ 처리 중 오류가 ë°œìƒí•˜ë¯€ë¡œ ì‚­ì œë¨: %s" msgid "Error sending a heartbeat to coordination backend." msgstr "하트비트를 ì¡°ì • 백엔드ì—서 보내는 ì¤‘ì— ì˜¤ë¥˜ê°€ ë°œìƒí–ˆìŠµë‹ˆë‹¤." msgid "Fail to process a notification" msgstr "ì•Œë¦¼ì„ ì²˜ë¦¬í•˜ëŠ” ë° ì‹¤íŒ¨" msgid "Fail to process notification" msgstr "ì•Œë¦¼ì„ ì²˜ë¦¬í•˜ëŠ” ë° ì‹¤íŒ¨" msgid "Failed to connect to Gnocchi." msgstr "Gnocchiì— ì—°ê²°í•˜ì§€ 못했습니다." #, python-format msgid "Failed to connect to Kafka service: %s" msgstr "Kafka ì„œë¹„ìŠ¤ì— ì—°ê²°í•˜ëŠ” ë° ì‹¤íŒ¨: %s" #, python-format msgid "Failed to connect to db, purpose %(purpose)s re-try later: %(err)s" msgstr "DBì— ì—°ê²°í•˜ëŠ” ë° ì‹¤íŒ¨, %(purpose)s ìš©ë„를 ë‚˜ì¤‘ì— ë‹¤ì‹œ 시ë„: %(err)s" #, python-format msgid "Failed to connect to db, purpose %(purpose)s retry later: %(err)s" msgstr "DBì— ì—°ê²°í•˜ëŠ” ë° ì‹¤íŒ¨, %(purpose)s ìš©ë„를 ë‚˜ì¤‘ì— ë‹¤ì‹œ 시ë„: %(err)s" #, python-format msgid "Failed to load resource due to error %s" msgstr "%s 오류로 ì¸í•´ ìžì›ì„ 로드하는 ë° ì‹¤íŒ¨" #, python-format msgid "Failed to record event: %s" msgstr "ì´ë²¤íŠ¸ë¥¼ 기ë¡í•˜ëŠ” ë° ì‹¤íŒ¨: %s" #, python-format msgid "Failed to record metering data: %s" msgstr "측정 ë°ì´í„° ê¸°ë¡ ì‹¤íŒ¨: %s" msgid "Failed to retry to send sample data with max_retry times" msgstr "샘플 ë°ì´í„°ë¥¼ max_retry íšŸìˆ˜ë§Œí¼ ë³´ë‚´ëŠ” ë° ì‹¤íŒ¨" msgid "" "Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: Current agent is " "not part of group and cannot take tasks" msgstr "" "그룹 ID: %{group_id}s, 멤버: %{members}s, 사용ìž: %{me}s: 현재 ì—ì´ì „트가 ê·¸" "ë£¹ì˜ ì¼ë¶€ê°€ 아니므로 ìž‘ì—…ì„ ìˆ˜í–‰í•  수 ì—†ìŒ" #, python-format msgid "Invalid type %s specified" msgstr "올바르지 ì•Šì€ ìœ í˜• %sì´(ê°€) 지정ë¨" #, python-format msgid "Missing field %s" msgstr "%s 필드 누ë½" msgid "Passed resource dict must contain keys resource_id and resource_url." msgstr "ì „ë‹¬ëœ ìžì› dictì— í‚¤ resource_id와 resource_urlì´ í¬í•¨ë˜ì–´ì•¼ 합니다." #, python-format msgid "Required field %(field)s should be a %(type)s" msgstr "필수 필드 %(field)sì€(는) %(type)sì´ì–´ì•¼ 함" #, python-format msgid "Required field %s not specified" msgstr "필수 필드 %sì´(ê°€) 지정ë˜ì§€ 않ìŒ" #, python-format msgid "Required fields %s not specified" msgstr "필수 필드 %sì´(ê°€) 지정ë˜ì§€ 않ìŒ" #, python-format msgid "Skip invalid resource %s" msgstr "올바르지 ì•Šì€ ìžì› %s 건너뛰기" #, python-format msgid "Skipping %(name)s, keystone issue: %(exc)s" msgstr "%(name)s 건너뛰기, keystone 문제: %(exc)s" msgid "Status Code: %{code}s. Failed todispatch event: %{event}s" msgstr "ìƒíƒœ 코드: %{code}s. ì´ë²¤íŠ¸ë¥¼ 디스패치하는 ë° ì‹¤íŒ¨: %{event}s" #, python-format msgid "Unable to load changed event pipeline: %s" msgstr "ë³€ê²½ëœ ì´ë²¤íЏ 파ì´í”„ë¼ì¸ì„ 로드할 수 ì—†ìŒ: %s" #, python-format msgid "Unable to load changed pipeline: %s" msgstr "ë³€ê²½ëœ íŒŒì´í”„ë¼ì¸ì„ 로드할 수 ì—†ìŒ: %s" #, python-format msgid "Unrecognized type value %s" msgstr "ì¸ì‹ë˜ì§€ ì•Šì€ ìœ í˜• ê°’ %s" #, python-format msgid "inspector call failed for %(ident)s host %(host)s: %(err)s" msgstr "%(ident)s 호스트 %(host)sì˜ ê²€ì‚¬ê¸° í˜¸ì¶œì— ì‹¤íŒ¨: %(err)s" ceilometer-6.1.5/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-warning.po0000664000567000056710000001316213072744706030124 0ustar jenkinsjenkins00000000000000# Sungjin Kang , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-05-19 11:00+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-07 03:34+0000\n" "Last-Translator: SeYeon Lee \n" "Language-Team: Korean (South Korea)\n" "Language: ko-KR\n" "X-Generator: Zanata 3.7.3\n" "Plural-Forms: nplurals=1; plural=0\n" msgid "Can't connect to keystone, assuming aodh is disabled and retry later." msgstr "" "Keystoneì— ì—°ê²°í•  수 없습니다 . Aodhê°€ 사용ë˜ì§€ 않는다고 가정하여 ë‚˜ì¤‘ì— ë‹¤" "시 시ë„합니다." msgid "Can't connect to keystone, assuming gnocchi is disabled and retry later" msgstr "" "Keystoneì— ì—°ê²°í•  수 없습니다. Gnocchiê°€ 사용ë˜ì§€ 않는다고 가정하여 ë‚˜ì¤‘ì— ë‹¤" "시 시ë„합니다." msgid "" "Cannot extract tasks because agent failed to join group properly. Rejoining " "group." msgstr "" "ì—ì´ì „트가 ì ì ˆí•˜ê²Œ ê·¸ë£¹ì„ ê²°í•©í•˜ì§€ 못했으므로 ìž‘ì—…ì„ ì¶”ì¶œí•  수 없습니다. ê·¸" "ë£¹ì„ ë‹¤ì‹œ 결합합니다." #, python-format msgid "" "Cannot inspect data of %(pollster)s for %(instance_id)s, non-fatal reason: " "%(exc)s" msgstr "" "%(instance_id)sì˜ %(pollster)s ë°ì´í„°ë¥¼ 검사할 수 없습니다. 치명ì ì´ì§€ ì•Šì€ " "ì´ìœ : %(exc)s" #, python-format msgid "Dropping out of time order sample: %s" msgstr "시간 순서 샘플ì—서 ì‚­ì œ: %s" #, python-format msgid "Dropping sample with no predecessor: %s" msgstr "ì„ í–‰ ìž‘ì—…ì´ ì—†ëŠ” 샘플 ì‚­ì œ: %s" #, python-format msgid "Duplicated values: %s found in CLI options, auto de-duplicated" msgstr "ì¤‘ë³µëœ ê°’: CLI ì˜µì…˜ì— %sì´(ê°€) 있습니다. ìžë™ìœ¼ë¡œ ì¤‘ë³µì´ í•´ì œë©ë‹ˆë‹¤." #, python-format msgid "Failed to load any dispatchers for %s" msgstr "%sì˜ ë””ìŠ¤íŒ¨ì²˜ë¥¼ 로드하는 ë° ì‹¤íŒ¨" #, python-format msgid "" "Failed to parse date from set fields, both fields %(start)s and %(end)s must " "be datetime: %(err)s" msgstr "" "설정 필드ì—서 ë°ì´í„°ë¥¼ 구문 ë¶„ì„하는 ë° ì‹¤íŒ¨, ë‘ í•„ë“œ %(start)s 와 %(end)sì€" "(는) ëª¨ë‘ datetimeìž„: %(err)s" #, python-format msgid "Ignore unrecognized field %s" msgstr "ì¸ì‹ë˜ì§€ 않는 필드 %s 무시" #, python-format msgid "Invalid status, skipping IP address %s" msgstr "올바르지 ì•Šì€ ìƒíƒœ, IP 주소 %s 건너뛰기" msgid "Negative delta detected, dropping value" msgstr "ìŒìˆ˜ì˜ ë¸íƒ€ê°€ 발견ë˜ì–´ ê°’ì„ ì‚­ì œí•¨" #, python-format msgid "No endpoints found for service %s" msgstr "%s ì„œë¹„ìŠ¤ì˜ ì—”ë“œí¬ì¸íŠ¸ë¥¼ ì°¾ì„ ìˆ˜ ì—†ìŒ" msgid "" "Non-metric meters may be collected. It is highly advisable to disable these " "meters using ceilometer.conf or the pipeline.yaml" msgstr "" "비측정 미터를 수집할 수 없습니다. celometer.conf ë˜ëŠ” pipeline.yamlì„ ì‚¬ìš©í•˜" "ì—¬ ì´ëŸ¬í•œ 미터를 사용하지 않게 설정하는 ê²ƒì´ ì¢‹ìŠµë‹ˆë‹¤." #, python-format msgid "" "Skipping %(name)s, %(service_type)s service is not registered in keystone" msgstr " %(name)s, %(service_type)s 서비스 건너뛰기는 keystoneì— ë“±ë¡ë˜ì§€ 않ìŒ" #, python-format msgid "Skipping duplicate meter definition %s" msgstr "중복 측정 ì •ì˜ %s 건너뛰기" msgid "" "Timedelta plugin is required two timestamp fields to create timedelta value." msgstr "" "Timedelta 플러그ì¸ì—서 timedelta ê°’ì„ ìƒì„±í•˜ë ¤ë©´ ë‘ ê°œì˜ ì‹œê°„ì†Œì¸ í•„ë“œê°€ í•„ìš”" "합니다." msgid "" "ceilometer-api started with aodh enabled. Alarms URLs will be redirected to " "aodh endpoint." msgstr "" "Aodhê°€ ì‚¬ìš©ëœ ìƒíƒœë¡œ ceilometer-apiê°€ 시작ë˜ì—ˆìŠµë‹ˆë‹¤. 알람 URLì´ aodh 엔드í¬" "ì¸íŠ¸ë¡œ 경로가 재지정ë©ë‹ˆë‹¤." msgid "" "ceilometer-api started with gnocchi enabled. The resources/meters/samples " "URLs are disabled." msgstr "" "Gnocchi를 사용한 ìƒíƒœë¡œ ceilometer-apiê°€ 시작ë˜ì—ˆìŠµë‹ˆë‹¤. ìžì›/측정/샘플 URL" "ì„ ì‚¬ìš©í•˜ì§€ 않습니다." #, python-format msgid "event signature invalid, discarding event: %s" msgstr "ì´ë²¤íЏ ì„œëª…ì´ ì˜¬ë°”ë¥´ì§€ 않아 ì´ë²¤íŠ¸ë¥¼ 삭제함: %s" #, python-format msgid "message signature invalid, discarding message: %r" msgstr "올바르지 ì•Šì€ ë©”ì‹œì§€ 서명. 메시지 버리는 중: %r" #, python-format msgid "" "metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has no " "volume (volume: None), the sample will be dropped" msgstr "" "%(resource_id)s @ %(timestamp)sì˜ ì¸¡ì • ë°ì´í„° %(counter_name)sì— ë³¼ë¥¨" "(volume: None)ì´ ì—†ìœ¼ë¯€ë¡œ ìƒ˜í”Œì´ ì‚­ì œë©ë‹ˆë‹¤." #, python-format msgid "" "metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has " "volume which is not a number (volume: %(counter_volume)s), the sample will " "be dropped" msgstr "" "%(resource_id)s @ %(timestamp)sì˜ ì¸¡ì • ë°ì´í„° %(counter_name)sì— ë²ˆí˜¸" "(volume: %(counter_volume)s)ê°€ 아닌 ë³¼ë¥¨ì´ ìžˆìœ¼ë¯€ë¡œ, ìƒ˜í”Œì´ ì‚­ì œë©ë‹ˆë‹¤." msgid "" "pecan_debug cannot be enabled, if workers is > 1, the value is overrided " "with False" msgstr "" "pecan_debug를 사용하ë„ë¡ ì„¤ì •í•  수 없습니다. 작업ìžê°€ > 1ì´ë©´ ê°’ì´ False로 ê²¹" "ì³ì”니다." #, python-format msgid "" "split plugin is deprecated, add \".`split(%(sep)s, %(segment)d, " "%(max_split)d)`\" to your jsonpath instead" msgstr "" "ë¶„í•  플러그ì¸ì€ ë” ì´ìƒ 사용ë˜ì§€ 않ìŒ, 대신 \".`split(%(sep)s, %(segment)d, " "%(max_split)d)`\"ì„(를) jsonpathì— ì¶”ê°€" #, python-format msgid "unable to configure oslo_cache: %s" msgstr "oslo_cache를 구성할 수 ì—†ìŒ: %s" ceilometer-6.1.5/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-info.po0000664000567000056710000001027313072744706027412 0ustar jenkinsjenkins00000000000000# Sungjin Kang , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-05-19 11:00+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-07 03:39+0000\n" "Last-Translator: SeYeon Lee \n" "Language-Team: Korean (South Korea)\n" "Language: ko-KR\n" "X-Generator: Zanata 3.7.3\n" "Plural-Forms: nplurals=1; plural=0\n" #, python-format msgid "%d events are removed from database" msgstr "ë°ì´í„°ë² ì´ìФì—서 %d ì´ë²¤íŠ¸ê°€ 제거ë¨" #, python-format msgid "%d samples removed from database" msgstr "ë°ì´í„°ë² ì´ìФì—서 %d ìƒ˜í”Œì´ ì œê±°ë¨" msgid "Configuration:" msgstr "구성:" #, python-format msgid "Connecting to %(db)s on %(nodelist)s" msgstr "%(nodelist)sì—서 %(db)sì— ì—°ê²° 중 " msgid "Coordination backend started successfully." msgstr "ì¡°ì • 백엔드가 성공ì ìœ¼ë¡œ 시작ë˜ì—ˆìŠµë‹ˆë‹¤." #, python-format msgid "Definitions: %s" msgstr "ì •ì˜: %s" msgid "Detected change in pipeline configuration." msgstr "파ì´í”„ë¼ì¸ êµ¬ì„±ì˜ ë³€ê²½ì„ ë°œê²¬í–ˆìŠµë‹ˆë‹¤." #, python-format msgid "Dropping event data with TTL %d" msgstr "TTLì´ %dì¸ ì´ë²¤íЏ ë°ì´í„° ì‚­ì œ" #, python-format msgid "Dropping metering data with TTL %d" msgstr "TTLì´ %dì¸ ì¸¡ì • ë°ì´í„° ì‚­ì œ" #, python-format msgid "Duplicate event detected, skipping it: %s" msgstr "중복 ì´ë²¤íŠ¸ê°€ 발견ë˜ì–´ 해당 ì´ë²¤íŠ¸ë¥¼ 건너뜀: %s" msgid "Expired residual resource and meter definition data" msgstr "잔여 ìžì› ë° ì¸¡ì • ì •ì˜ ë°ì´í„° 만료ë¨" #, python-format msgid "Index %s will be recreate." msgstr "%s ì¸ë±ìŠ¤ê°€ 다시 ìƒì„±ë©ë‹ˆë‹¤." #, python-format msgid "Joined partitioning group %s" msgstr "ê²°í•©ëœ íŒŒí‹°ì…˜ 그룹 %s" #, python-format msgid "Left partitioning group %s" msgstr "ë‚¨ì€ íŒŒí‹°ì…˜ 그룹 %s" #, python-format msgid "No limit value provided, result set will be limited to %(limit)d." msgstr "한계 ê°’ì´ ì œê³µë˜ì§€ 않ìŒ, ê²°ê³¼ 세트가 %(limit)d(으)로 제한ë©ë‹ˆë‹¤." msgid "Nothing to clean, database event time to live is disabled" msgstr "정리할 ì‚¬í•­ì´ ì—†ìŒ, ë°ì´í„°ë² ì´ìФ ì´ë²¤íЏ ì§€ì† ì‹œê°„(TTL)ì´ ì‚¬ìš©ë˜ì§€ 않ìŒ" msgid "Nothing to clean, database metering time to live is disabled" msgstr "정리할 ì‚¬í•­ì´ ì—†ìŒ, ë°ì´í„°ë² ì´ìФ 측정 ì§€ì† ì‹œê°„(TTL)ì´ ì‚¬ìš©ë˜ì§€ 않ìŒ" #, python-format msgid "" "Pipeline %(pipeline)s: Setup transformer instance %(name)s with parameter " "%(param)s" msgstr "" "파ì´í”„ë¼ì¸ %(pipeline)s: %(param)s 매개변수로 변환기 ì¸ìŠ¤í„´ìŠ¤ %(name)s 설정 " #, python-format msgid "Pipeline config: %s" msgstr "파ì´í”„ë¼ì¸ 구성: %s" msgid "Pipeline configuration file has been updated." msgstr "파ì´í”„ë¼ì¸ 구성 파ì¼ì´ ì—…ë°ì´íЏë˜ì—ˆìŠµë‹ˆë‹¤." #, python-format msgid "Polling pollster %(poll)s in the context of %(src)s" msgstr "%(src)s 컨í…ìŠ¤íŠ¸ì˜ ì˜ê²¬ì¡°ì‚¬ìž %(poll)s í´ë§" #, python-format msgid "Publishing policy set to %s" msgstr "공개 ì •ì±…ì´ %s(으)로 설정ë¨" msgid "Reconfiguring polling tasks." msgstr "í´ë§ ìž‘ì—…ì„ ìž¬êµ¬ì„±í•©ë‹ˆë‹¤." msgid "Reloading notification agent and listeners." msgstr "알림 ì—ì´ì „트와 리스너를 다시 로드합니다." #, python-format msgid "Skip pollster %(name)s, no %(p_context)sresources found this cycle" msgstr "Pollster %(name)s 건너뛰기, %(p_context)s ìžì›ì—서 ì´ ì£¼ê¸°ë¥¼ 발견함" #, python-format msgid "Starting server in PID %s" msgstr "PID %sì˜ ì„œë²„ 시작" msgid "detected decoupled pipeline config format" msgstr "비결합 파ì´í”„ë¼ì¸ 구성 í˜•ì‹ ë°œê²¬" #, python-format msgid "metering data %(counter_name)s for %(resource_id)s: %(counter_volume)s" msgstr "%(resource_id)sì˜ ì¸¡ì • ë°ì´í„° %(counter_name)s: %(counter_volume)s" #, python-format msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s" msgstr "0.0.0.0:%(sport)sì—서 전달 중, http://127.0.0.1:%(vport)sì—서 보기" #, python-format msgid "serving on http://%(host)s:%(port)s" msgstr "http://%(host)s:%(port)sì—서 전달 중" ceilometer-6.1.5/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po0000664000567000056710000004104113072744706025677 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Seong-ho Cho , 2014 # Seunghyo Chun , 2013 # Seunghyo Chun , 2013 # Sungjin Kang , 2013 # Sungjin Kang , 2013 # Sungjin Kang , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-05-19 11:00+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-07 03:48+0000\n" "Last-Translator: Lucas Palm \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)sì„(를) ì°¾ì„ ìˆ˜ ì—†ìŒ" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "'%s' 표현ì‹ì—서 산술 변환기는 하나 ì´ìƒì˜ 미터를 사용해야 함" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "%(table_name)s í…Œì´ë¸”ì„ ìž‘ì„±í•  수 ì—†ìŒ, ì´ë¯¸ 존재합니다. 오류 무시" #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "%(name)sì—서 오류 후 계ì†: %(error)s" #, python-format msgid "Could not connect slave host: %s " msgstr "슬레ì´ë¸Œ 호스트를 ì—°ê²°í•  수 ì—†ìŒ: %s " #, python-format msgid "Could not connect to XenAPI: %s" msgstr "XenAPI를 ì—°ê²°í•  수 ì—†ìŒ: %s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "%(id)sì— ëŒ€í•´ CPU Utilì„ ê°€ì ¸ì˜¬ 수 ì—†ìŒ: %(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "%(id)sì— ëŒ€í•œ 메모리 ì‚¬ìš©ì„ ê°€ì ¸ì˜¬ 수 ì—†ìŒ: %(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "VM %s CPU ì´ìš©ë¥ ì„ 가져올 수 ì—†ìŒ" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "%s ì¸ìŠ¤í„´ìŠ¤ IP 주소를 ì–»ì„ ìˆ˜ ì—†ìŒ" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "" "디스패처 대ìƒì„ 설정하지 않았으며 미터가 게시ë˜ì§€ 않습니다. ceilometer.conf " "파ì¼ì— 대ìƒì„ 설정하십시오." #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "알림 %(type)s ì‚­ì œ 중(uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "ì¸ìŠ¤í„´ìŠ¤ 검색 중 libvirtì—서 오류 ë°œìƒ: [오류 ì½”" "드 %(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "HTTP ì‘답 구문 ë¶„ì„ ì¤‘ 오류 ë°œìƒ: %s" msgid "Error stopping pollster." msgstr "pollster를 중지하는 ì¤‘ì— ì˜¤ë¥˜ê°€ ë°œìƒí–ˆìŠµë‹ˆë‹¤. " msgid "Event" msgstr "ì´ë²¤íЏ" msgid "Expression evaluated to a NaN value!" msgstr "표현ì‹ì´ NaN 값으로 í‰ê°€ë˜ì—ˆìŠµë‹ˆë‹¤!" #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "%(name)s 확장ìžë¥¼ 가져오는 ë° ì‹¤íŒ¨í•¨: %(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "ì¸ìŠ¤í„´ìŠ¤ ë°ì´í„° 검사 실패, ë„ë©”ì¸ ìƒíƒœê°€ SHUTOFFìž…" "니다." #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "%(instance_uuid)s 메모리 사용량 검사 실패, libvirtì—서 정보를 가져올 수 ì—†" "ìŒ: %(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "ì¸ìŠ¤í„´ìŠ¤ 메모리 사용량 검사 실패, libvirtì—서 ì •ë³´" "를 가져올 수 없습니다." #, python-format msgid "Failed to load any notification handlers for %s" msgstr "%s 알림 핸들러 로드 실패" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Timestamp ê°’ %s 구문 ë¶„ì„ ì‹¤íŒ¨" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "%d ë°ì´í„°í¬ì¸íЏ 공개 실패. ì´ë¥¼ 삭제하는 중" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "%d ë°ì´í„°í¬ì¸íЏ 공개 실패. ì´ë¥¼ íì— ëŒ€ê¸°ì‹œí‚´" #, python-format msgid "Failed to record metering data: %s" msgstr "측정 ë°ì´í„° ê¸°ë¡ ì‹¤íŒ¨: %s" #, python-format msgid "Filter expression not valid: %s" msgstr "í•„í„° 표현ì‹ì´ 올바르지 않ìŒ: %s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "ì¸ìŠ¤í„´ìŠ¤ %(name)s (%(instance_id)s) 무시 중: %(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "ì¸ìŠ¤í„´ìŠ¤ %(name)s 무시 중: %(error)s" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "로드 밸런서 %(loadbalancer_id)s 무시" #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "í’€ %(pool_id)s 무시" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "다ìŒì—서 ì •ì˜ íŒŒì¼ %(file)sì˜ ì˜¬ë°”ë¥´ì§€ ì•Šì€ YAML 구문: í–‰: %(line)s, ì—´: " "%(column)s" #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "올바르지 ì•Šì€ ê¸°ê°„ %(period)s: %(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "특성 %(trait)sì— ëŒ€í•œ 올바르지 ì•Šì€ íŠ¹ì„± 유형 '%(type)s'" msgid "Limit must be positive" msgstr "제한 ê°’ì€ ì–‘ìˆ˜ì—¬ì•¼ 합니다." #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "IDê°€ %sì¸ ë‘˜ ì´ìƒì˜ ì´ë²¤íŠ¸ê°€ 스토리지 드ë¼ì´ë²„ì—서 리턴ë¨" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "여러 VM %sì„(를) XenServerì—서 ì°¾ìŒ " msgid "Must specify connection_url, and connection_password to use" msgstr "사용할 connection_url와 connection_password를 지정해야 함 " #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "%(name)sì— ëŒ€í•´ %(plugin)s(ì´)ë¼ëŠ” 플러그ì¸ì„ 사용할 수 ì—†ìŒ" msgid "Node Manager init failed" msgstr "노드 ê´€ë¦¬ìž ì´ˆê¸°í™” 실패" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "%(aspect)s %(id)sì— ëŒ€í•œ 액세스 ê¶Œí•œì´ ë¶€ì—¬ë˜ì§€ 않ìŒ" #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "OpenDaylitght APIê°€ %(status)s 리턴: %(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "Opencontrail APIê°€ %(status)s 리턴: %(reason)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "" "ì—°ì‚°ìž %(operator)sì´(ê°€) ì§€ì›ë˜ì§€ 않습니다. 필드 %(field)sì—는 등호 ì—°ì‚°ìž" "ë§Œ 사용할 수 있습니다." #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "" "ì—°ì‚°ìž %(operator)sì´(ê°€) ì§€ì›ë˜ì§€ 않습니다. ì§€ì›ë˜ëŠ” ì—°ì‚°ìžëŠ” %(supported)s" "입니다. " #, python-format msgid "Order-by expression not valid: %s" msgstr "ì •ë ¬ 표현ì‹ì´ 올바르지 않ìŒ: %s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" " %(name)sì— ëŒ€í•œ JSONPath 스펙 '%(jsonpath)s'ì˜ êµ¬ë¬¸ ë¶„ì„ ì˜¤ë¥˜: %(err)s" msgid "Period must be positive." msgstr "ê¸°ê°„ì€ ì–‘ìˆ˜ì—¬ì•¼ 합니다. " #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "파ì´í”„ë¼ì¸ %(pipeline)s: Publisher %(pub)sì—서 오류 후 %(status)s" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "파ì´í”„ë¼ì¸ %(pipeline)s: Publisher %(pub)sì—서 오류 후 계ì†" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "" "파ì´í”„ë¼ì¸ %(pipeline)s: Transformer %(trans)sì„(를) 비우는 중 오류 ë°œìƒ" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "" "파ì´í”„ë¼ì¸ %(pipeline)s: %(smp)sì˜ transformer %(trans)sì—서 오류 후 종료" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "플러그ì¸ì´ 지정ë˜ì§€ 않았지만, %sì— í”ŒëŸ¬ê·¸ì¸ ì´ë¦„ì´ ì œê³µë˜ì§€ 않ìŒ" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "í´ë§ %(mtr)s 센서가 %(cnt)s번 실패했습니다!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "í´ë§ %(name)sì´(ê°€) %(cnt)s번 실패했습니다!" #, python-format msgid "Pollster for %s is disabled!" msgstr "%s pollsterê°€ 사용 안함으로 설정ë˜ì–´ 있습니다!" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "" "Pollster %(name)sì´(ê°€) 소스 %(source)s를 ë” ì´ìƒ í´ë§í•˜ì§€ 않ë„ë¡ í•˜ì‹­ì‹œì˜¤!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "ê³µê°œìž ìµœëŒ€ local_queue 길ì´ê°€ 초과ë¨. %d 가장 ì˜¤ëž˜ëœ ìƒ˜í”Œ ì‚­ì œ 중" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "공개 ì •ì±…ì„ ì•Œ 수 ì—†ìŒ(%s). 기본값으로 ê°•ì œ 설정함" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW AdminOps APIê°€ %(status)s %(reason)sì„(를) 리턴함" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "ìš”ì²­ì´ NorthBound REST API로 OpenDaylightì— ì—°ê²°í•˜ëŠ” ë° ì‹¤íŒ¨í•¨" #, python-format msgid "Required field %s not specified" msgstr "필수 필드 %sì´(ê°€) 지정ë˜ì§€ 않ìŒ" msgid "Resource" msgstr "리소스" msgid "Sample" msgstr "샘플" msgid "Samples should be included in request body" msgstr "ìƒ˜í”Œì´ ìš”ì²­ ë³¸ë¬¸ì— í¬í•¨ë˜ì–´ì•¼ 함" #, python-format msgid "Skip loading extension for %s" msgstr "%s í™•ìž¥ìž ë¡œë“œ 건너뛰기" #, python-format msgid "String %s is not a valid isotime" msgstr "문ìžì—´ %sì´(ê°€) 올바른 ë“±ì‹œê°„ì´ ì•„ë‹˜" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "샘플과 gnocchi resources/ metric ê°„ ë§µí•‘ì„ ì •ì˜í•˜ëŠ” Yaml 파ì¼" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "ë°ì´í„° 유형 %(type)sì´(ê°€) ì§€ì›ë˜ì§€ 않습니다. ì§€ì›ë˜ëŠ” ë°ì´í„° 유형 목ë¡ì€ " "%(supported)s입니다." #, python-format msgid "The field 'fields' is required for %s" msgstr "%sì— 'fields' 필드 í•„ìš”" msgid "The path for the file publisher is required" msgstr "íŒŒì¼ ê³µê°œìžì˜ 경로가 필요함" #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr " UDP: %sì´(ê°€) 보낸 ë°ì´í„°ë¥¼ í•´ë…í•  수 없습니다" msgid "UDP: Unable to store meter" msgstr "UDP: ì¸¡ì •ì„ ì €ìž¥í•  수 없습니다" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "ë°ì´í„°ë² ì´ìФ ì„œë²„ì— ì—°ê²°í•  수 ì—†ìŒ: %(errmsg)s." #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "%(value)s ê°’ì„ ì˜ˆìƒ ë°ì´í„° 유형 %(type)s(으)로 변환할 수 없습니다." #, python-format msgid "Unable to discover resources: %s" msgstr "ìžì›ì„ 검색할 수 ì—†ìŒ: %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "%(expr)s 표현ì‹ì„ í‰ê°€í•  수 ì—†ìŒ: %(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "Publisher %sì„(를) 로드할 수 ì—†ìŒ" #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "하ì´í¼ë°”ì´ì € 검사기를 로드할 수 ì—†ìŒ: %s" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "" "%(retries)d 회 재시ë„한 ì´í›„ì—는 1ì°¨ mongodbì— ë‹¤ì‹œ ì—°ê²°í•  수 없습니다. í¬ê¸°" "하는 중입니다." #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "1ì°¨ mongodbì— ë‹¤ì‹œ ì—°ê²°í•  수 ì—†ìŒ: %(errmsg)s. %(retry_interval)d ì´ˆ í›„ì— ë‹¤" "시 시ë„합니다." msgid "Unable to send sample over UDP" msgstr "UDP를 통해 ìƒ˜í”Œì„ ì „ì†¡í•  수 ì—†ìŒ" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "%(value)sì„(를) 예ìƒëœ ë°ì´í„° 유형으로 변환하는 ì¤‘ì— ì˜ˆìƒì¹˜ ì•Šì€ ì˜ˆì™¸ ë°œìƒ " "%(type)s." #, python-format msgid "Unknown discovery extension: %s" msgstr "알 수 없는 검색 확장ìž: %s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "알 수 없는 메타ë°ì´í„° 유형입니다. 키(%s)를 조회할 수 없습니다." #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "로드 밸런서 %(id)sì—서 알 수 없는 ìƒíƒœ %(stat)sì´(ê°€) 수신ë¨. 샘플 건너뛰기" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "fw %(id)sì—서 알 수 없는 ìƒíƒœ %(stat)sì´(ê°€) 수신ë¨. ìƒ˜í”Œì„ ê±´ë„ˆë›°ëŠ” 중" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "리스너 %(id)sì—서 알 수 없는 ìƒíƒœ %(stat)sì´(ê°€) 수신ë¨. 샘플 건너뛰기" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "멤버 %(id)sì—서 알 수 없는 ìƒíƒœ %(stat)sì´(ê°€) 수신ë¨. ìƒ˜í”Œì„ ê±´ë„ˆë›°ëŠ” 중" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "í’€ %(id)sì—서 알 수 없는 ìƒíƒœ %(stat)sì´(ê°€) 수신ë¨. ìƒ˜í”Œì„ ê±´ë„ˆë›°ëŠ” 중" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "vIP %(id)sì—서 알 수 없는 ìƒíƒœ %(stat)sì´(ê°€) 수신ë¨. ìƒ˜í”Œì„ ê±´ë„ˆë›°ëŠ” 중" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "VPN %(id)sì—서 알 수 없는 ìƒíƒœ %(stat)sì´(ê°€) 수신ë¨. 샘플 건너뛰기" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "VM %sì„(를) VMware vSphereì—서 ì°¾ì„ ìˆ˜ ì—†ìŒ" #, python-format msgid "VM %s not found in XenServer" msgstr "VM %sì„(를) XenServerì—서 ì°¾ì„ ìˆ˜ ì—†ìŒ " msgid "Wrong sensor type" msgstr "ìž˜ëª»ëœ ì„¼ì„œ 유형" msgid "XenAPI not installed" msgstr "XenAPIê°€ 설치ë˜ì§€ 않ìŒ" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "ì •ì˜ íŒŒì¼ %(file)sì„(를) ì½ëŠ” ì¤‘ì— YAML 오류 ë°œìƒ" msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." msgstr "" "Aodh를 사용하지 않게 설정하거나 사용할 수 없는 경우 경보 URLì„ ì‚¬ìš©í•  수 없습" "니다." #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "%(id)sì˜ CPU ì‹œê°„ì„ ê°€ì ¸ì˜¬ 수 ì—†ìŒ: %(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "Gnocchi를 사용할 때 ì§ì ‘ ì˜µì…˜ì€ trueì¼ ìˆ˜ 없습니다." #, python-format msgid "dropping out of time order sample: %s" msgstr "시간 순서 샘플ì—서 벗어남: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "ì„ í–‰ ìž‘ì—…ì´ ì—†ëŠ” 샘플 ì‚­ì œ: %s" msgid "ipmitool output length mismatch" msgstr "Ipmitool 출력 ê¸¸ì´ ë¶ˆì¼ì¹˜" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes ë° backup_count는 숫ìžì—¬ì•¼ 합니다." #, python-format msgid "message signature invalid, discarding message: %r" msgstr "올바르지 ì•Šì€ ë©”ì‹œì§€ 서명. 메시지 버리는 중: %r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "IPMI 센서 ë°ì´í„° 구문 ë¶„ì„ì— ì‹¤íŒ¨í–ˆìŒ, ì œê³µëœ ìž…ë ¥ì—서 ê²€ìƒ‰ëœ ë°ì´í„°ê°€ ì—†ìŒ" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "IPMI 센서 ë°ì´í„° 구문 ë¶„ì„ì— ì‹¤íŒ¨í–ˆìŒ, 알 수 없는 센서 유형" msgid "running ipmitool failure" msgstr "Ipmitool 실행 실패" ceilometer-6.1.5/ceilometer/locale/fr/0000775000567000056710000000000013072745164021023 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/fr/LC_MESSAGES/0000775000567000056710000000000013072745164022610 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/fr/LC_MESSAGES/ceilometer.po0000664000567000056710000004243513072744706025311 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Corinne Verheyde , 2013 # CHABERT Loic , 2013 # Christophe kryskool , 2013 # Corinne Verheyde , 2013-2014 # EVEILLARD , 2013-2014 # Francesco Vollero , 2015 # Jonathan Dupart , 2014 # CHABERT Loic , 2013 # Maxime COQUEREL , 2014 # Nick Barcet , 2013 # Nick Barcet , 2013 # Andrew Melim , 2014 # Patrice LACHANCE , 2013 # Patrice LACHANCE , 2013 # Rémi Le Trocquer , 2014 # EVEILLARD , 2013 # Corinne Verheyde , 2013 # Corinne Verheyde , 2013 # leroy , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-05-19 11:00+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-24 08:42+0000\n" "Last-Translator: leroy \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: French\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)s non trouvé" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "Le transformateur arithmétique doit utiliser au moins un compteur dans " "l'expression '%s'" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "" "Impossible de créer la table %(table_name)s car elle existe déjà. Erreur " "ignorée" #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "Continuer après l'erreur de %(name)s : %(error)s " #, python-format msgid "Could not connect slave host: %s " msgstr "Impossible de se connecter à l'hôte esclave : %s " #, python-format msgid "Could not connect to XenAPI: %s" msgstr "Impossible de se connecter à XenAPI : %s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "Impossible d'obtenir l'utilisation UC pour %(id)s : %(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "Impossible d'obtenir l'utilisation de la mémoire pour %(id)s : %(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "Impossible d'obtenir l'utilisation UC de la machine virtuelle %s" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "Impossible d'obtenir l'adresse IP de l'instance %s" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "" "La cible du répartiteur n'était pas définie, aucun compteur ne sera publié. " "Définissez la cible dans le fichier ceilometer.conf" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Suppression de la notification %(type)s (uuid : %(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Erreur de libvirt lors de la recherche de l'instance : [Code d'erreur %(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "Erreur lors de l'analyse syntaxique de la réponse HTTP : %s" msgid "Error stopping pollster." msgstr "Erreur lors de l'arrêt du sondeur." msgid "Event" msgstr "Événement" msgid "Expression evaluated to a NaN value!" msgstr "Expression évaluée avec une valeur NaN !" #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "Échec de l'importation de l'extension pour %(name)s : %(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Échec de l'inspection des données de l'instance . " "Le domaine est à l'état SHUTOFF (INTERRUPTION)." #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "Échec de l'inspection de l'utilisation de la mémoire de %(instance_uuid)s. " "Impossible d'obtenir des informations de libvirt : %(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "Échec de l'inspection de l'utilisation de la mémoire de l'instance . Impossible d'obtenir des informations de libvirt." #, python-format msgid "Failed to load any notification handlers for %s" msgstr "Échec du chargement des gestionnaires de notification pour %s" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Échec de l'analyse syntaxique de la valeur d'horodatage %s" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "Échec de la publication de %d points de données. Suppression en cours" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "" "Échec de la publication de %d points de données. Mise en file d'attente" #, python-format msgid "Failed to record metering data: %s" msgstr "Impossible d'enregistrer les données de mesure : %s" #, python-format msgid "Filter expression not valid: %s" msgstr "Expression de filtre non valide : %s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "Instance %(name)s ignorée (%(instance_id)s) : %(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "Instance %(name)s : %(error)s ignorée" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "Équilibreur de charge %(loadbalancer_id)s ignoré" #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "Pool %(pool_id)s ignoré" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Syntaxe YAML non valide dans le fichier de définitions %(file)s à la ligne : " "%(line)s, colonne : %(column)s." #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "Période %(period)s non valide : %(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "Type de trait non valide '%(type)s' pour le trait %(trait)s" msgid "Limit must be positive" msgstr "La limite doit être positive" #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "Plusieurs événements avec l'ID %s retournés par le pilote de stockage" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "Plusieurs machines virtuelles %s trouvées dans XenServer" msgid "Must specify connection_url, and connection_password to use" msgstr "" "connection_url et connection_password doivent être indiqués pour " "l'utilisation de" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Aucun plug-in nommé %(plugin)s n'est disponible pour %(name)s" msgid "Node Manager init failed" msgstr "Échec de l'initialisation du gestionnaire de nÅ“ud" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Non autorisé à accéder à %(aspect)s %(id)s " #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "L'API OpenDaylight a renvoyé %(status)s %(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "L'API Opencontrail a renvoyé %(status)s %(reason)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "" "Opérateur %(operator)s non pris en charge. Seul l'opérateur d'égalité est " "disponible pour la zone %(field)s" #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "" "L'opérateur %(operator)s n'est pas pris en charge. Les opérateurs pris en " "charge sont les suivants : %(supported)s" #, python-format msgid "Order-by expression not valid: %s" msgstr "Expression Order-by non valide : %s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Erreur d'analyse syntaxique dans la spécification JSONPath '%(jsonpath)s' " "pour %(name)s : %(err)s" msgid "Period must be positive." msgstr "La période doit être positive." #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "" "Pipeline %(pipeline)s : Statut %(status)s après erreur de l'éditeur %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "Pipeline %(pipeline)s : Reprise après une erreur de l'éditeur %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "" "Pipeline %(pipeline)s : Erreur lors de la purge du transformateur %(trans)s" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "" "Pipeline %(pipeline)s : Sortie après erreur du transformateur %(trans)s pour " "%(smp)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Plug-in spécifié, mais aucun nom de plug-in fourni pour %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "L'interrogation du capteur %(mtr)s a échoué %(cnt)s fois !" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "Échec du sondage de %(name)s pendant %(cnt)s fois !" #, python-format msgid "Pollster for %s is disabled!" msgstr "Le sondeur pour %s est désactivé !" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "Empêcher le sondeur %(name)s d'interroger la source %(source)s !" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "La longueur maximale de local_queue de l'éditeur est dépassée, suppression " "des %d échantillons les plus anciens" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "" "La stratégie de publication est inconnue (%s), forcée sur stratégie par " "défaut" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "L'API AdminOps RGW a renvoyé %(status)s %(reason)s" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "" "La demande n'a pas pu se connecter à OpenDaylight avec l'API REST NorthBound" #, python-format msgid "Required field %s not specified" msgstr "Zone obligatoire %s non spécifiée" msgid "Resource" msgstr "Ressource" msgid "Sample" msgstr "Échantillon" msgid "Samples should be included in request body" msgstr "Des exemples doivent être inclus dans le corps de la demande" #, python-format msgid "Skip loading extension for %s" msgstr "Omettre le chargement de l'extension pour %s" #, python-format msgid "String %s is not a valid isotime" msgstr "La chaine %s n'est pas un isotime valide" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "" "Fichier Yaml qui définit le mappage entre les échantillons et les ressources/" "métriques gnocchi" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "Le type de données %(type)s n'est pas pris en charge. La liste de types de " "données pris en charge est la suivante : %(supported)s" #, python-format msgid "The field 'fields' is required for %s" msgstr "La zone 'fields' est obligatoire pour %s" msgid "The path for the file publisher is required" msgstr "Le chemin de l'éditeur de fichier est obligatoire " #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "UDP : Impossible de décoder les données envoyées par %s" msgid "UDP: Unable to store meter" msgstr "UDP: Impossible de stocker le compteur" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "Impossible de se connecter au serveur de base de données : %(errmsg)s." #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Impossible de convertir la valeur %(value)s dans le type de données attendu " "%(type)s." #, python-format msgid "Unable to discover resources: %s" msgstr "Impossible de découvrir les ressources : %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "Impossible d'évaluer l'expression %(expr)s : %(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "Impossible de charger l'éditeur %s " #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "Impossible de télécharger l'inspecteur d'hyperviseur : %s" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "" "Impossible de se reconnecter au serveur mongodb principal au bout de " "%(retries)d tentatives. Abandon." #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "Impossible de se reconnecter au serveur mongodb principal : %(errmsg)s. " "Nouvelle tentative dans %(retry_interval)d secondes." msgid "Unable to send sample over UDP" msgstr "Impossible d'envoyer l'échantillon sur UDP" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Exception inattendue lors de la conversion de %(value)s dans le type de " "donnée attendu %(type)s." #, python-format msgid "Unknown discovery extension: %s" msgstr "Extension de découverte inconnue : %s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "Type de métadonnées inconnu. La clé (%s) ne peut pas être interrogée." #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Statut %(stat)s inconnu reçu sur l'équilibreur de charge %(id)s, " "échantillon ignoré" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "Statut %(stat)s inconnu reçu sur le pare-feu %(id)s, échantillon ignoré" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "Statut %(stat)s inconnu reçu sur le programme d'écoute %(id)s, échantillon " "ignoré" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "Statut %(stat)s inconnu reçu sur le membre %(id)s, échantillon ignoré" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "Statut %(stat)s inconnu reçu sur le pool %(id)s, échantillon ignoré" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "Statut %(stat)s inconnu reçu sur l'IP virtuelle %(id)s, échantillon ignoré" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "Statut %(stat)s inconnu reçu sur le VPN %(id)s, échantillon ignoré" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "Machine virtuelle %s non trouvée dans VMware vSphere" #, python-format msgid "VM %s not found in XenServer" msgstr "VM %s non trouvé dans XenServer" msgid "Wrong sensor type" msgstr "Type de détecteur erroné" msgid "XenAPI not installed" msgstr "XenAPI non installé" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "Erreur YAML lors de la lecture du fichier de définitions %(file)s" msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." msgstr "" "Les URL d'alarmes ne sont pas disponibles lorsque Aodh est désactivé ou non " "disponible." #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "ne peut pas obtenir le temps UC pour %(id)s : %(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "" "L'option directe ne peut pas être définie sur vrai si Gnocchi est activé." #, python-format msgid "dropping out of time order sample: %s" msgstr "suppression de l'échantillon de classement temporel : %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "abandon de l'échantillon sans prédécesseur : %s" msgid "ipmitool output length mismatch" msgstr "Non concordance de longueur de la sortie ipmitool" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes et backup_count doivent être des nombres." #, python-format msgid "message signature invalid, discarding message: %r" msgstr "signature de message non valide, message ignoré : %r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "Échec de l'analyse des données du détecteur IPMI. Aucune donnée extraite à " "partir de l'entrée fournie" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "" "Échec de l'analyse des données du détecteur IPMI, type de détecteur inconnu" msgid "running ipmitool failure" msgstr "Échec d'exécution d'ipmitool" ceilometer-6.1.5/ceilometer/locale/it/0000775000567000056710000000000013072745164021030 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/it/LC_MESSAGES/0000775000567000056710000000000013072745164022615 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/it/LC_MESSAGES/ceilometer.po0000664000567000056710000004000513072744706025305 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Stefano Maffulli , 2013 # Tom Cocozzello , 2015. #zanata # Alessandra , 2016. #zanata # Tom Cocozzello , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-05-19 11:00+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-19 05:24+0000\n" "Last-Translator: Alessandra \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Italian\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)s non trovato" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "Il trasformatore aritmetico deve utilizzare almeno un contatore " "nell'espressione '%s'" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "" "Impossibile creare la tabella %(table_name)s la tabella già esiste. " "Ignorare l'errore" #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "Continua dopo errore da %(name)s: %(error)s" #, python-format msgid "Could not connect slave host: %s " msgstr "Impossibile connettersi all'host slave: %s " #, python-format msgid "Could not connect to XenAPI: %s" msgstr "Impossibile connettersi a XenAPI: %s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "Impossibile ricevere CPU Util per %(id)s: %(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "Impossibile ricevere l'Uso della Memoria per %(id)s: %(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "Impossibile conoscere l'utilizzo CPU della VM %s" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "Impossibile ottenere l'indirizzo IP dell'istanza %s" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "" "La destinazione del dispatcher non è stata impostata, nessun contatore verrà " "inviato. Impostare la destinazione nel file ceilometer.conf" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Eliminazione della notifica %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Errore da libvirt durante la ricerca dell'istanza : [Codice di errore %(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "Errore durante l'analisi della risposta HTTP: %s" msgid "Error stopping pollster." msgstr "Errore durante l'arresto del sondaggio. " msgid "Event" msgstr "Evento" msgid "Expression evaluated to a NaN value!" msgstr "Espressione valutata a un valore NaN!" #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "Impossibile importare l'estensione per %(name)s: %(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Impossibile ispezionare i dati dell'istanza = , " "stato dominio SHUTOFF." #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "Impossibile ispezionare l'utilizzo della memoria da parte di " "%(instance_uuid)s, impossibile ottenere informazioni da libvirt: %(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "Impossibile ispezionare l'utilizzo della memoria da parte dell'istanza = " ", impossibile ottenere informazioni da libvirt." #, python-format msgid "Failed to load any notification handlers for %s" msgstr "Impossibile caricare eventuali gestori di notifica per %s" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Impossibile analizzare il valore data/ora %s" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "Impossibile pubblicare %d datapoint, eliminati" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "Impossibile pubblicare %d datapoint, inseriti in coda" #, python-format msgid "Failed to record metering data: %s" msgstr "Impossibile registrare i dati di misurazione: %s" #, python-format msgid "Filter expression not valid: %s" msgstr "Espressione del filtro non valida: %s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "L'istanza %(name)s (%(instance_id)s) viene ignorata: %(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "Si sta ignorando l'istanza %(name)s: %(error)s" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "Ignora loadbalancer %(loadbalancer_id)s" #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "Ignora pool %(pool_id)s" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Sintassi YAML non valida nel file delle definizioni %(file)s alla riga: " "%(line)s, colonna: %(column)s." #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "Periodo non valido %(period)s: %(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "" "Tipo di caratteristica non valido '%(type)s' per la caratteristica %(trait)s" msgid "Limit must be positive" msgstr "Il limite deve essere un positivo" #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "Più di un evento con id %s restituito dal driver di archiviazione" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "Più VM %s trovate in XenServer" msgid "Must specify connection_url, and connection_password to use" msgstr "" "È necessario specificare connection_url e connection_password da utilizzare" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Nessun plug-in con nome %(plugin)s disponibile per %(name)s" msgid "Node Manager init failed" msgstr "Inizializzazione gestore nodi non riuscita" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Non autorizzato ad accedere %(aspect)s %(id)s" #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "L'API OpenDaylitght ha restituito %(status)s %(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "L'API Opencontrail ha restituito %(status)s %(reason)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "" "Operatore %(operator)s non è supportato. Solo gli operatori di uguaglianza " "sono disponibili per il campo %(field)s" #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "" "Operatore %(operator)s non è supportato. Gli operatori supportati sono: " "%(supported)s" #, python-format msgid "Order-by expression not valid: %s" msgstr "L'espressione ordina per non è valida: %s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Errore di analisi nella specifica JSONPath '%(jsonpath)s' per %(name)s: " "%(err)s" msgid "Period must be positive." msgstr "Il periodo deve essere positivo" #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "Pipeline %(pipeline)s: %(status)s dopo errore da publisher %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "Pipeline %(pipeline)s: Continuare dopo errore da publisher %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "" "Pipeline %(pipeline)s: errore durante lo scaricamento del trasformatore " "%(trans)s" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "" "Pipeline %(pipeline)s: Uscita dopo errore del trasformatore %(trans)s per " "%(smp)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Plug-in specificato, ma nessun nome di plug-in fornito per %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "Polling del sensore %(mtr)s non riuscito per %(cnt)s volte!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "Polling di %(name)s non riuscito per %(cnt)s volte!" #, python-format msgid "Pollster for %s is disabled!" msgstr "Pollster per %s disabilitato!" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "" "Impedire al pollster %(name)s di eseguire il polling dell'origine %(source)s." #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "La lunghezza local_queue massima del publisher è stata superata, " "eliminazione di esempi %d meno recenti" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "" "La politica di pubblicazione è sconosciuta (%s), applicazione del valore " "predefinito" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "L'API RGW AdminOps ha restituito %(status)s %(reason)s" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "" "Richiesta di collegamento a OpenDaylight con API NorthBound REST non riuscita" #, python-format msgid "Required field %s not specified" msgstr "Campo richiesto %s non specificato" msgid "Resource" msgstr "Risorsa" msgid "Sample" msgstr "Esempio" msgid "Samples should be included in request body" msgstr "I campioni devono essere inclusi nel corpo della richiesta " #, python-format msgid "Skip loading extension for %s" msgstr "Ignora caricamento dell'estensione per %s" #, python-format msgid "String %s is not a valid isotime" msgstr "La stringa %s non è un orario standard (isotime) valido" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "" "Il file Yaml che definisce l'associazione tra i campioni e le risorse " "gnocchi/metriche" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "Il tipo di dati %(type)s non è supportato. L'elenco dei tipi di dati " "supportati è: %(supported)s" #, python-format msgid "The field 'fields' is required for %s" msgstr "Il campo 'fields' è obbligatorio per %s" msgid "The path for the file publisher is required" msgstr "Il percorso per il publisher di file è obbligatorio" #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "UDP: impossibile decodificare i dati inviati da %s" msgid "UDP: Unable to store meter" msgstr "UDP: impossibile memorizzare il contatore" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "Impossibile connettersi al server di database: %(errmsg)s." #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Impossibile convertire il valore %(value)s nel tipo di dati previsto " "%(type)s." #, python-format msgid "Unable to discover resources: %s" msgstr "Impossibile rilevare le risorse: %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "Impossibile valutare l'espressione %(expr)s: %(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "Impossibile caricare il publisher %s" #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "Impossibile caricare il programma di controllo hypervisor: %s" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "" "Impossibile riconnettersi al mongodb primario dopo %(retries)d tentativi. " "L'operazione viene interrotta." #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "Impossibile connettersi al mongodb primario: %(errmsg)s. Prossimo tentativo " "tra %(retry_interval)d secondi." msgid "Unable to send sample over UDP" msgstr "Impossibile inviare l'esempio su UDP" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Eccezione non prevista durante la conversione di %(value)s per il tipo di " "dati previsto %(type)s." #, python-format msgid "Unknown discovery extension: %s" msgstr "Estensione di rilevamento sconosciuta: %s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "" "Tipo di metadati sconosciuto. La chiave (%s) non potrà essere sottoposta a " "query." #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto su bilanciatore del carico %(id)s, " "ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "Stato non conosciuto %(stat)s ricevuto su fw %(id)s,ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto su listener %(id)s, ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto su membro %(id)s, ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto sul pool %(id)s, ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto su vip %(id)s, ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto su vpn %(id)s, ignorare l'esempio" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "VM %s non trovata in VMware vSphere" #, python-format msgid "VM %s not found in XenServer" msgstr "VM %s non trovata in XenServer" msgid "Wrong sensor type" msgstr "Tipo di sensore errato" msgid "XenAPI not installed" msgstr "XenAPI non installato" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "Errore YAML durante la lettura del file definizioni %(file)s" #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "impossibile ricevere l'ora CPU per %(id)s: %(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "L'opzione direct non può essere true quando Gnocchi è abilitato." #, python-format msgid "dropping out of time order sample: %s" msgstr "rilascio campione ordinamento fuori tempo: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "eliminazione in corso dell'esempio senza predecessore: %s" msgid "ipmitool output length mismatch" msgstr "mancata corrispondenza della lunghezza dell'output ipmitool" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes e backup_count devono essere numeri." #, python-format msgid "message signature invalid, discarding message: %r" msgstr "Firma messaggio non valida, eliminazione del messaggio: %r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "analisi dei dati del sensore IPMI non riuscita, nessun dato recuperato " "dall'input fornito" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "" "analisi dei dati del sensore IPMI non riuscita, tipo di sensore sconosciuto" msgid "running ipmitool failure" msgstr "errore nell'esecuzione ipmitool" ceilometer-6.1.5/ceilometer/locale/zh_CN/0000775000567000056710000000000013072745164021415 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/zh_CN/LC_MESSAGES/0000775000567000056710000000000013072745164023202 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer-log-error.po0000664000567000056710000001121313072744706027577 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # aji.zqfan , 2015 # Shuwen SUN , 2014 # OpenStack Infra , 2015. #zanata # Gaoxiao Zhu , 2016. #zanata # chunhan , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev17\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-06-17 10:46+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-06-30 02:28+0000\n" "Last-Translator: chunhan \n" "Language: zh-CN\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (China)\n" #, python-format msgid "Cannot load inspector %(name)s: %(err)s" msgstr "无法加载检查器 %(name)s: %(err)s" #, python-format msgid "Could not get Resident Memory Usage for %(id)s: %(e)s" msgstr "无法为%(id)s获å–常驻内存使用é‡ï¼š%(e)s" #, python-format msgid "Dispatcher failed to handle the %s, requeue it." msgstr "分å‘器处ç†%så¤±è´¥ï¼Œé‡æ–°æŽ’队" msgid "Error connecting to coordination backend." msgstr "连接到å调器åŽç«¯å‡ºé”™" msgid "Error getting group membership info from coordination backend." msgstr "从å调器åŽç«¯èŽ·å–组æˆå‘˜ä¿¡æ¯æ—¶å‡ºé”™" #, python-format msgid "Error joining partitioning group %s, re-trying" msgstr "加入分组 %s出错,正在é‡è¯•" #, python-format msgid "Error loading meter definition : %(err)s" msgstr "加载计é‡çš„定义失败:%(err)s" #, python-format msgid "Error processing event and it will be dropped: %s" msgstr "处ç†äº‹ä»¶%s时出错,将被丢弃" msgid "Error sending a heartbeat to coordination backend." msgstr "å‘é€å¿ƒè·³ä¿¡æ¯è‡³å调器åŽç«¯å‡ºé”™" msgid "Fail to process a notification" msgstr "处ç†é€šçŸ¥å¤±è´¥" msgid "Fail to process notification" msgstr "处ç†é€šçŸ¥å¤±è´¥" msgid "Failed to connect to Gnocchi." msgstr "连接Gnocchi失败。" #, python-format msgid "Failed to connect to Kafka service: %s" msgstr "连接KafkaæœåŠ¡å¤±è´¥ï¼š%s" #, python-format msgid "Failed to connect to db, purpose %(purpose)s re-try later: %(err)s" msgstr "连接数æ®åº“失败,目的%(purpose)s ç¨åŽé‡è¯•:%(err)s" #, python-format msgid "Failed to connect to db, purpose %(purpose)s retry later: %(err)s" msgstr "连接数æ®åº“失败,目的%(purpose)s ç¨åŽé‡è¯•:%(err)s" #, python-format msgid "Failed to load resource due to error %s" msgstr "加载资æºå¤±è´¥å› ä¸ºé”™è¯¯%s" #, python-format msgid "Failed to record event: %s" msgstr "无法记录事件:%s" #, python-format msgid "Failed to record metering data: %s" msgstr "无法ä¿å­˜ç›‘控数æ®ï¼š%s" msgid "Failed to retry to send sample data with max_retry times" msgstr "å°è¯•å‘é€é‡‡æ ·æ•°æ®è¾¾åˆ°æœ€å¤§é‡è¯•次数åŽä»å¤±è´¥" msgid "" "Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: Current agent is " "not part of group and cannot take tasks" msgstr "" "组ID:%{group_id}s,æˆå‘˜ï¼š%{members}s,当å‰ç”¨æˆ·%{me}s:当å‰ç”¨æˆ·ä¸åœ¨ç»„中,无法执" "行任务" #, python-format msgid "Invalid type %s specified" msgstr "被指定的类型%s无效" #, python-format msgid "Missing field %s" msgstr "缺少字段%s" msgid "Passed resource dict must contain keys resource_id and resource_url." msgstr "传入的资æºå­—典必须包å«é”®resource_idå’Œresource_url" #, python-format msgid "Required field %(field)s should be a %(type)s" msgstr "所需字段%(field)s应为%(type)s类型" #, python-format msgid "Required field %s not specified" msgstr "未指定所需字段 %s" #, python-format msgid "Required fields %s not specified" msgstr "必需字段%s未被指定" #, python-format msgid "Skip invalid resource %s" msgstr "跳过无效资æº%s" #, python-format msgid "Skipping %(name)s, keystone issue: %(exc)s" msgstr "跳过%(name)s,由keystone引起:%(exc)s" msgid "Status Code: %{code}s. Failed todispatch event: %{event}s" msgstr "状æ€ç ï¼š%{code}s。分å‘事件:%{event}s失败" #, python-format msgid "Unable to load changed event pipeline: %s" msgstr "无法加载更改的事件管é“:%s" #, python-format msgid "Unable to load changed pipeline: %s" msgstr "无法加载更改的管é“:%s" #, python-format msgid "Unrecognized type value %s" msgstr "无法识别的类型值 %s" #, python-format msgid "inspector call failed for %(ident)s host %(host)s: %(err)s" msgstr "标识为%(ident)s的主机%(host)s检查器调用失败: %(err)s" ceilometer-6.1.5/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer-log-warning.po0000664000567000056710000001225613072744706030123 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # aji.zqfan , 2015 # OpenStack Infra , 2015. #zanata # chunhan , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev17\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-06-17 10:46+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-06-30 03:36+0000\n" "Last-Translator: chunhan \n" "Language: zh-CN\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (China)\n" msgid "Can't connect to keystone, assuming aodh is disabled and retry later." msgstr "无法连接到keystone,请ä¿è¯gnocchi 未开å¯å†é‡è¯•" msgid "Can't connect to keystone, assuming gnocchi is disabled and retry later" msgstr "无法连接到keystone,请ä¿è¯gnocchi 未开å¯å†é‡è¯•" msgid "" "Cannot extract tasks because agent failed to join group properly. Rejoining " "group." msgstr "无法获å–任务,因为代ç†åŠ å…¥ç»„å¤±è´¥ï¼Œæ­£åœ¨é‡æ–°åŠ å…¥" #, python-format msgid "" "Cannot inspect data of %(pollster)s for %(instance_id)s, non-fatal reason: " "%(exc)s" msgstr "" "采集器%(pollster)s无法为虚拟机%(instance_id)s采集数æ®ï¼Œéžè‡´å‘½é”™è¯¯ï¼š%(exc)s" #, python-format msgid "Dropping out of time order sample: %s" msgstr "正在丢弃过期的采样数æ®%s" #, python-format msgid "Dropping sample with no predecessor: %s" msgstr "æ­£åœ¨ä¸¢å¼ƒæ²¡æœ‰ä¹‹å‰æ•°æ®çš„采样值:%s" #, python-format msgid "Duplicated values: %s found in CLI options, auto de-duplicated" msgstr "é‡å¤å€¼ï¼šå‘现控制å°å‚æ•°%s,自动去除é‡å¤é¡¹" #, python-format msgid "Failed to load any dispatchers for %s" msgstr "无法为%s加载任何分å‘器" #, python-format msgid "" "Failed to parse date from set fields, both fields %(start)s and %(end)s must " "be datetime: %(err)s" msgstr "" "ä»Žè®¾å®šçš„å€¼ä¸­æ— æ³•è§£æžæ—¶é—´å€¼ï¼Œ%(start)så’Œ%(end)s的值都应为日期时间:%(err)s" #, python-format msgid "Ignore unrecognized field %s" msgstr "无法识别字段%s" #, python-format msgid "Invalid status, skipping IP address %s" msgstr "ä¸å¯ç”¨çжæ€ï¼Œè·³è¿‡IP地å€%s" msgid "Negative delta detected, dropping value" msgstr "检测到负å˜åŒ–é‡ï¼Œæ­£åœ¨ä¸¢å¼ƒè¯¥å€¼" #, python-format msgid "No endpoints found for service %s" msgstr "%sæœåŠ¡çš„å…¥å£æœªæ‰¾åˆ°" msgid "" "Non-metric meters may be collected. It is highly advisable to disable these " "meters using ceilometer.conf or the pipeline.yaml" msgstr "" "éžè®¡é‡æŒ‡æ ‡ä¹Ÿè®¸ä¼šè¢«æ”¶é›†ã€‚强烈建议在ceilometer.conf或者pipeline.yaml中ç¦ç”¨è¿™äº›" "指标" #, python-format msgid "" "Skipping %(name)s, %(service_type)s service is not registered in keystone" msgstr "忽略%(name)s,因为%(service_type)s 类型的æœåŠ¡åœ¨keystone中未注册" #, python-format msgid "Skipping duplicate meter definition %s" msgstr "跳过é‡å¤çš„度é‡å®šä¹‰%s" msgid "" "Timedelta plugin is required two timestamp fields to create timedelta value." msgstr "æ—¶é—´å˜åŒ–æ’件需è¦ä¸¤ä¸ªæ—¶é—´æˆ³çš„值æ¥ç”Ÿæˆæ—¶é—´å˜åŒ–值" msgid "" "ceilometer-api started with aodh enabled. Alarms URLs will be redirected to " "aodh endpoint." msgstr "" "在aodhå¯ç”¨æ—¶å¯åЍceilometer-apiæœåŠ¡ï¼Œå°†å¯¼è‡´è­¦å‘Šçš„URLs被é‡å®šå‘至aodh 端点" msgid "" "ceilometer-api started with gnocchi enabled. The resources/meters/samples " "URLs are disabled." msgstr "" "在gnocchi å¯ç”¨æ—¶å¯åЍceilometer-apiæœåŠ¡ï¼Œå°†å¯¼è‡´URLs为/meters/samples的资æºå°†ä¸" "å¯ç”¨" #, python-format msgid "event signature invalid, discarding event: %s" msgstr "äº‹ä»¶ç­¾åæ— æ•ˆï¼Œä¸¢å¼ƒäº‹ä»¶ï¼š%s" #, python-format msgid "message signature invalid, discarding message: %r" msgstr "消æ¯ç­¾åä¸åˆæ³•,丢弃消æ¯ï¼š%r" #, python-format msgid "" "metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has no " "volume (volume: None), the sample will be dropped" msgstr "" "资æº%(resource_id)s在%(timestamp)s时间的监控数æ®%(counter_name)s 没有值(或为" "空),采样将被丢弃" #, python-format msgid "" "metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has " "volume which is not a number (volume: %(counter_volume)s), the sample will " "be dropped" msgstr "" "资æº%(resource_id)s在%(timestamp)s时间的监控数æ®%(counter_name)s 有值当ä¸ä¸ºæ•°" "字(值为:%(counter_volume)s),采样将被丢弃" msgid "" "pecan_debug cannot be enabled, if workers is > 1, the value is overrided " "with False" msgstr "pecan_debug无法被å¯ç”¨ï¼Œå¦‚æžœapi_workers>1,该值会被é‡ç½®ä¸ºFalse" #, python-format msgid "" "split plugin is deprecated, add \".`split(%(sep)s, %(segment)d, " "%(max_split)d)`\" to your jsonpath instead" msgstr "" "分割æ’件已被废弃,在你的JSON路径中添加.`split(%(sep)s, %(segment)d, " "%(max_split)d)`æ¥ä»£æ›¿" #, python-format msgid "unable to configure oslo_cache: %s" msgstr "无法é…ç½®é…ç½®è§£æžæ¨¡å—缓存:%s" ceilometer-6.1.5/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer-log-info.po0000664000567000056710000001043413072744706027405 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # aji.zqfan , 2015 # Lianhao Lu , 2014 # OpenStack Infra , 2015. #zanata # chunhan , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev17\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-06-17 10:46+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-06-30 03:01+0000\n" "Last-Translator: chunhan \n" "Language: zh-CN\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (China)\n" #, python-format msgid "%d events are removed from database" msgstr "%d个事件从数æ®åº“中被移除" #, python-format msgid "%d samples removed from database" msgstr "从数æ®åº“中移除%d个采样数æ®ã€‚" msgid "Configuration:" msgstr "é…ç½®" #, python-format msgid "Connecting to %(db)s on %(nodelist)s" msgstr "正在连接到%(nodelist)s节点的数æ®åº“%(db)s。" msgid "Coordination backend started successfully." msgstr "å调器åŽå°å¯åЍæˆåŠŸ" #, python-format msgid "Definitions: %s" msgstr "定义: %s" msgid "Detected change in pipeline configuration." msgstr "检测到管é“é…ç½®å‘生改å˜" #, python-format msgid "Dropping event data with TTL %d" msgstr "正在根æ®TTL: %d丢弃事件数æ®" #, python-format msgid "Dropping metering data with TTL %d" msgstr "正在根æ®TTL %d丢弃监控数æ®" #, python-format msgid "Duplicate event detected, skipping it: %s" msgstr "检测到é‡å¤äº‹ä»¶ï¼Œæ­£åœ¨è·³è¿‡ï¼š%s" msgid "Expired residual resource and meter definition data" msgstr "剩余资æºå’Œè®¡é‡è¡¨å®šä¹‰æ•°æ®å·²è¿‡æœŸ" #, python-format msgid "Index %s will be recreate." msgstr "索引%så°†è¢«é‡æ–°åˆ›å»º" #, python-format msgid "Joined partitioning group %s" msgstr "已加入分组%s" #, python-format msgid "Left partitioning group %s" msgstr "已离开分组%s" #, python-format msgid "No limit value provided, result set will be limited to %(limit)d." msgstr "é™åˆ¶å€¼æœªè¢«æä¾›ï¼Œç»“果集应被é™åˆ¶åˆ°%(limit)d." msgid "Nothing to clean, database event time to live is disabled" msgstr "事件数æ®åº“过期时间未激活,没有数æ®è¦æ¸…除" msgid "Nothing to clean, database metering time to live is disabled" msgstr "监控数æ®åº“过期时间未激活,没有数æ®è¦æ¸…除" #, python-format msgid "" "Pipeline %(pipeline)s: Setup transformer instance %(name)s with parameter " "%(param)s" msgstr "管é“%(pipeline)sï¼šä½¿ç”¨å‚æ•°%(param)såˆå§‹åŒ–å˜å½¢å™¨å®žä¾‹%(name)s" #, python-format msgid "Pipeline config: %s" msgstr "管é“é…置:%s" msgid "Pipeline configuration file has been updated." msgstr "管é“é…置文件已被更新" #, python-format msgid "Polling pollster %(poll)s in the context of %(src)s" msgstr "在上下文环境%(src)s执行采集器%(poll)s" #, python-format msgid "Publishing policy set to %s" msgstr "设置å‘布策略为%s" msgid "Reconfiguring polling tasks." msgstr "釿–°é…置轮询任务" msgid "Reloading notification agent and listeners." msgstr "釿–°åŠ è½½é€šçŸ¥ä»£ç†å’Œjianting" #, python-format msgid "Skip pollster %(name)s, no %(p_context)sresources found this cycle" msgstr "跳过采集器%(name)s,本周期无法找到上下文为%(p_context)s的资æº" #, python-format msgid "Starting server in PID %s" msgstr "正在å¯åЍæœåŠ¡è¿›ç¨‹PID %s" #, python-format msgid "Swift endpoint not found: %s" msgstr "对象存储的端点组:%s未被找到" msgid "detected decoupled pipeline config format" msgstr "检测到分离的管é“é…置格å¼" #, python-format msgid "metering data %(counter_name)s for %(resource_id)s: %(counter_volume)s" msgstr "%(resource_id)s的资æºç›‘控数æ®%(counter_name)s 值为:%(counter_volume)s" #, python-format msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s" msgstr "" "æœåŠ¡åœ¨0.0.0.0:%(sport)s上è¿è¡Œï¼Œå¯ä»¥é€šè¿‡http://127.0.0.1:%(vport)sæ¥æŸ¥çœ‹" #, python-format msgid "serving on http://%(host)s:%(port)s" msgstr "æœåŠ¡åœ¨http://%(host)s:%(port)s上è¿è¡Œ" ceilometer-6.1.5/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po0000664000567000056710000003576413072744706025712 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # aji.zqfan , 2015 # yelu , 2013 # Tom Fifield , 2013 # 颜海峰 , 2014 # yelu , 2013 # Yu Zhang, 2013 # Yu Zhang, 2013 # 颜海峰 , 2014 # English translations for ceilometer. # Linda , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-05-19 11:00+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-29 04:40+0000\n" "Last-Translator: Linda \n" "Language: zh-CN\n" "Language-Team: Chinese (China)\n" "Plural-Forms: nplurals=1; plural=0\n" "Generated-By: Babel 2.2.0\n" "X-Generator: Zanata 3.7.3\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "无法找到 %(entity)s %(id)s " #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "算术å˜å½¢å™¨åœ¨è¡¨è¾¾å¼â€œ%sâ€ä¸­å¿…须至少使用一个指标" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "无法创建表 %(table_name)s,因为它已存在。忽略此错误" #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "%(name)s 出错:%(error)s,继续执行" #, python-format msgid "Could not connect slave host: %s " msgstr "无法连接伺æœä¸»æœºï¼š%s " #, python-format msgid "Could not connect to XenAPI: %s" msgstr "无法连接到 XenAPI:%s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "æ— æ³•èŽ·å– %(id)s çš„ CPU 使用率:%(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "æ— æ³•èŽ·å– %(id)s 的内存使用情况:%(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "æ— æ³•èŽ·å– VM %s CPU 使用率" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "无法获å–实例 %s çš„ IP 地å€" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "" "未设置分å‘器目标,将ä¸ä¼šå‘å¸ƒæµ‹é‡æ•°æ®ã€‚请在 ceilometer.conf 文件中设置目标。" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "正在删除通知 %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "查找实例 时,libvirt 中出错:[é”™è¯¯ä»£ç  " "%(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "è§£æž HTTP å“应失败:%s" msgid "Error stopping pollster." msgstr "åœæ­¢è½®è¯¢ç¨‹åºæ—¶å‡ºé”™ã€‚" msgid "Event" msgstr "事件" msgid "Expression evaluated to a NaN value!" msgstr "表达å¼è®¡ç®—结果为 NaN 值ï¼" #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "未能导入 %(name)s 的扩展:%(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "未能检查到实例 的数æ®ï¼ŒåŸŸçжæ€ä¸º SHUTOFF。" #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "未能检查到 %(instance_uuid)s 的内存使用情况,无法从 libvirt 获å–ä¿¡æ¯ï¼š" "%(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "未能检查到实例 的内存使用情况,无法从 libvirt 获å–" "ä¿¡æ¯ã€‚" #, python-format msgid "Failed to load any notification handlers for %s" msgstr "无法为 %s 加载任何通知处ç†å™¨" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "æœªèƒ½è§£æžæ—¶é—´æˆ³è®°å€¼ %s" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "未能å‘布 %d 个数æ®ç‚¹ï¼Œæ­£åœ¨å°†å…¶åˆ é™¤" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "未能å‘布 %d 个数æ®ç‚¹ï¼Œå°†å…¶å…¥é˜Ÿ" #, python-format msgid "Failed to record metering data: %s" msgstr "æ— æ³•è®°å½•æµ‹é‡æ•°æ®ï¼š%s" #, python-format msgid "Filter expression not valid: %s" msgstr "è¿‡æ»¤è¡¨è¾¾å¼æ— æ•ˆï¼š%s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "忽略实例 %(name)s (%(instance_id)s):%(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "忽略实例 %(name)s:%(error)s" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "正在忽略负载å‡è¡¡å™¨ %(loadbalancer_id)s" #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "正在忽略池 %(pool_id)s" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "定义文件 %(file)s 的第 %(line)s 行和第 %(column)s 列中有无效 YAML 语法。" #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "无效的时间段 %(period)s:%(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "ç‰¹å¾ %(trait)s 包å«äº†æ— æ•ˆæ³•的特å¾ç±»åž‹â€œ%(type)s†" msgid "Limit must be positive" msgstr "limit 必须是正数" #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "从存储库驱动程åºè¿”回了多个标识为 %s 的事件" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "在 XenServer 中找到多个 VM %s" msgid "Must specify connection_url, and connection_password to use" msgstr "使用时必须指定 connection_url å’Œ connection_password" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "未对 %(name)s æä¾›å为 %(plugin)s çš„æ’ä»¶" msgid "Node Manager init failed" msgstr "节点管ç†å™¨åˆå§‹åŒ–失败" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "未授æƒè®¿é—® %(aspect)s %(id)s" #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "OpenDaylitght API è¿”å›žäº†çŠ¶æ€ %(status)s %(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "Opencontrail API 返回了 %(status)s %(reason)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "è¿ç®—符 %(operator)s ä¸å—支æŒã€‚对于字段 %(field)s,åªèƒ½ä½¿ç”¨ç­‰å·è¿ç®—符" #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "è¿ç®—符 %(operator)s ä¸å—支æŒã€‚å—æ”¯æŒçš„è¿ç®—符为:%(supported)s" #, python-format msgid "Order-by expression not valid: %s" msgstr "排åºä¾æ®è¡¨è¾¾å¼æ— æ•ˆï¼š%s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "%(name)s çš„ JSONPath 规范“%(jsonpath)sâ€ä¸­å­˜åœ¨è§£æžé”™è¯¯ï¼š%(err)s" msgid "Period must be positive." msgstr "时间段必须是正数。" #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "" "ç®¡é“ %(pipeline)s:å‘å¸ƒç¨‹åº %(pub)s 中å‘生错误之åŽï¼Œå¤„于 %(status)s 状æ€" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "ç®¡é“ %(pipeline)s:å‘布器 %(pub)s 出错,继续执行" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "ç®¡é“ %(pipeline)s:清空å˜å½¢å™¨ %(trans)s 时出错" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "ç®¡é“ %(pipeline)s:%(smp)s çš„å˜å½¢å™¨%(trans)s 出错,退出" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "指定了æ’件,但没有为 %s æä¾›æ’ä»¶å" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "轮询 %(mtr)s 传感器已失败 %(cnt)s 次ï¼" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "轮询 %(name)s 已失败 %(cnt)s 次ï¼" #, python-format msgid "Pollster for %s is disabled!" msgstr "å·²ç¦ç”¨ %s 的采集器ï¼" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "è¯·é˜»æ­¢è½®è¯¢ç¨‹åº %(name)s å†è½®è¯¢æº %(source)sï¼" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "超过å‘布器的本地队列最大长度,正在删除最旧的 %d 个样本" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "未知的å‘布策略 (%s),强制使用缺çœå€¼" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW AdminOps API 返回了 %(status)s %(reason)s" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "请求无法连接到带有 NorthBound REST API çš„ OpenDaylight" #, python-format msgid "Required field %s not specified" msgstr "未指定所需字段 %s" msgid "Resource" msgstr "资æº" msgid "Sample" msgstr "样本" msgid "Samples should be included in request body" msgstr "样本应包括在请求主体中" #, python-format msgid "Skip loading extension for %s" msgstr "跳过 %s 的加载扩展" #, python-format msgid "String %s is not a valid isotime" msgstr "字符串 %s 是无效的标准时间格å¼" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "定义样本与 gnocchi 资æº/度é‡å€¼ä¹‹é—´çš„æ˜ å°„çš„ Yaml 文件" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "æ•°æ®ç±»åž‹ %(type)s ä¸å—支æŒã€‚å—æ”¯æŒçš„æ•°æ®ç±»åž‹åˆ—表是:%(supported)s" #, python-format msgid "The field 'fields' is required for %s" msgstr "%s 需è¦å­—段“fieldsâ€" msgid "The path for the file publisher is required" msgstr "éœ€è¦æ–‡ä»¶å‘布器的路径" #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "UDP:无法解ç ç”± %s å‘é€çš„æ•°æ®" msgid "UDP: Unable to store meter" msgstr "UDP:无法存储计é‡å™¨" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "无法连接到数æ®åº“æœåŠ¡å™¨ï¼š%(errmsg)s。" #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "无法将值 %(value)s 转æ¢ä¸ºé¢„期的数æ®ç±»åž‹ %(type)s。" #, python-format msgid "Unable to discover resources: %s" msgstr "无法å‘现资æºï¼š%s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "æ— æ³•è®¡ç®—è¡¨è¾¾å¼ %(expr)s:%(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "无法加载å‘布器 %s" #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "无法加载管ç†ç¨‹åºçš„æŽ¢æµ‹å™¨ï¼š%s" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "在 %(retries)d 次å°è¯•åŽä»æ— æ³•é‡è¿žåˆ°ä¸»è¦ MongoDB。放弃é‡è¿žã€‚" #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "无法é‡è¿žåˆ°ä¸»è¦ MongoDB:%(errmsg)s。在 %(retry_interval)d ç§’åŽè¿›è¡Œé‡è¯•。" msgid "Unable to send sample over UDP" msgstr "无法通过 UDP å‘逿 ·æœ¬" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "å°† %(value)s 转æ¢ä¸ºé¢„期的数æ®ç±»åž‹ %(type)s æ—¶å‘生了æ„外的异常。" #, python-format msgid "Unknown discovery extension: %s" msgstr "未知的å‘现扩展:%s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "未知的元数æ®ç±»åž‹ã€‚é”® (%s) 将无法进行查询。" #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "在负载å‡è¡¡å™¨ %(id)s ä¸ŠæŽ¥æ”¶åˆ°æœªçŸ¥çŠ¶æ€ %(stat)s,正在跳过样本" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "在 fw %(id)s ä¸ŠæŽ¥æ”¶åˆ°æœªçŸ¥çŠ¶æ€ %(stat)s,正在跳过样本" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "在侦å¬å™¨ %(id)s ä¸ŠæŽ¥æ”¶åˆ°æœªçŸ¥çŠ¶æ€ %(stat)s,正在跳过样本" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "在æˆå‘˜ %(id)s ä¸ŠæŽ¥æ”¶åˆ°æœªçŸ¥çŠ¶æ€ %(stat)s,正在跳过样本" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "在池 %(id)s ä¸ŠæŽ¥æ”¶åˆ°æœªçŸ¥çŠ¶æ€ %(stat)s,正在跳过样本" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "在 vip %(id)s ä¸ŠæŽ¥æ”¶åˆ°æœªçŸ¥çŠ¶æ€ %(stat)s,正在跳过样本" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "在 VPN %(id)s ä¸ŠæŽ¥æ”¶åˆ°æœªçŸ¥çŠ¶æ€ %(stat)s,正在跳过样本" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "在 VMware vSphere 中未找到 VM %s" #, python-format msgid "VM %s not found in XenServer" msgstr "在 XenServer 中未找到 VM %s" msgid "Wrong sensor type" msgstr "错误的传感器类型" msgid "XenAPI not installed" msgstr "没有安装 XenAPI" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "读å–定义文件 %(file)s æ—¶é‡åˆ° YAML 错误" msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." msgstr "Aodh 被ç¦ç”¨æˆ–ä¸å¯ç”¨æ—¶ï¼Œè­¦æŠ¥ URL ä¸å¯ç”¨ã€‚" #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "æ— æ³•èŽ·å– %(id)s çš„ CPU 时间:%(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "å¯ç”¨ Gnocchi åŽï¼Œdirect 选项ä¸èƒ½ä¸º true。" #, python-format msgid "dropping out of time order sample: %s" msgstr "æ­£åœ¨é€€å‡ºæ—¶é—´é¡ºåºæ ·æœ¬ï¼š%s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "æ­£åœ¨åˆ é™¤æ²¡æœ‰å…ˆå‰æ•°æ®çš„æ ·æœ¬ï¼š%s" msgid "ipmitool output length mismatch" msgstr "ipmitool 输出长度ä¸åŒ¹é…" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes å’Œ backup_count 应该是数字。" #, python-format msgid "message signature invalid, discarding message: %r" msgstr "消æ¯ç­¾å无效,丢弃消æ¯ï¼š%r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "è§£æž IPMI 传感器数æ®å¤±è´¥ï¼Œä»Žç»™å®šçš„输入中无法检索到数æ®" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "è§£æž IPMI 传感器数æ®å¤±è´¥ï¼ŒæœªçŸ¥çš„传感器类型" msgid "running ipmitool failure" msgstr "è¿è¡Œ ipmitool 时失败" ceilometer-6.1.5/ceilometer/locale/ja/0000775000567000056710000000000013072745164021006 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/ja/LC_MESSAGES/0000775000567000056710000000000013072745164022573 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/ja/LC_MESSAGES/ceilometer.po0000664000567000056710000004404613072744706025274 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Tomoyuki KATO , 2013 # Akihiro Motoki , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Tsutomu Kimura , 2016. #zanata # 笹原 昌美 , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-05-19 11:00+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-28 11:41+0000\n" "Last-Translator: 笹原 昌美 \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Japanese\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "演算変æ›ãƒ—ログラムã¯ã€å¼ '%s' ã§å°‘ãªãã¨ã‚‚ 1 ã¤ã®ãƒ¡ãƒ¼ã‚¿ãƒ¼ã‚’使用ã™ã‚‹å¿…è¦ãŒã‚り" "ã¾ã™" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "" "テーブル %(table_name)s ã¯æ—¢ã«å­˜åœ¨ã™ã‚‹ãŸã‚ã€ä½œæˆã§ãã¾ã›ã‚“。エラーを無視ã—ã¾" "ã™" #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "%(name)s ã‹ã‚‰ã®ã‚¨ãƒ©ãƒ¼ã®å¾Œã§ç¶šè¡Œã—ã¾ã™: %(error)s" #, python-format msgid "Could not connect slave host: %s " msgstr "ã‚¹ãƒ¬ãƒ¼ãƒ–ãƒ›ã‚¹ãƒˆã«æŽ¥ç¶šã§ãã¾ã›ã‚“ã§ã—ãŸ: %s " #, python-format msgid "Could not connect to XenAPI: %s" msgstr "XenAPI ã«æŽ¥ç¶šã§ãã¾ã›ã‚“ã§ã—ãŸ: %s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "%(id)s ã® CPU 使用率をå–å¾—ã§ãã¾ã›ã‚“ã§ã—ãŸ: %(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "%(id)s ã®ãƒ¡ãƒ¢ãƒªãƒ¼ä½¿ç”¨é‡ã‚’å–å¾—ã§ãã¾ã›ã‚“ã§ã—ãŸ: %(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "VM %s ã®CPU 使用率をå–å¾—ã§ãã¾ã›ã‚“ã§ã—ãŸ" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "インスタンス %s ã® IP アドレスをå–å¾—ã§ãã¾ã›ã‚“ã§ã—ãŸ" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "" "ディスパッãƒãƒ£ãƒ¼ã‚¿ãƒ¼ã‚²ãƒƒãƒˆãŒè¨­å®šã•れã¦ãŠã‚‰ãšã€ãƒ¡ãƒ¼ã‚¿ãƒ¼ã¯é€šçŸ¥ã•れã¾ã›ã‚“。" "ceilometer.conf ファイルã§ã‚¿ãƒ¼ã‚²ãƒƒãƒˆã‚’設定ã—ã¦ãã ã•ã„。" #, fuzzy, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "通知 %(type)s を除去ã—ã¦ã„ã¾ã™ (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "インスタンス ã®æ¤œç´¢ä¸­ã« libvirt ã§ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾" "ã—ãŸ: [エラーコード %(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "HTTP 応答を解æžã—ã¦ã„ã‚‹éš›ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸ: %s" msgid "Error stopping pollster." msgstr "pollster åœæ­¢ã‚¨ãƒ©ãƒ¼ã€‚" msgid "Event" msgstr "イベント" msgid "Expression evaluated to a NaN value!" msgstr "å¼ãŒ NaN 値ã«è©•価ã•れã¾ã—ãŸã€‚" #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "%(name)s ã®æ‹¡å¼µæ©Ÿèƒ½ã®ã‚¤ãƒ³ãƒãƒ¼ãƒˆã«å¤±æ•—ã—ã¾ã—ãŸ: %(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "インスタンス ã®ãƒ‡ãƒ¼ã‚¿ã‚’検査ã§ãã¾ã›ã‚“ã§ã—ãŸã€‚ドメ" "イン状態㯠SHUTOFF ã§ã™ã€‚" #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "%(instance_uuid)s ã®ãƒ¡ãƒ¢ãƒªãƒ¼ä½¿ç”¨çжæ³ã‚’検査ã§ãã¾ã›ã‚“ã§ã—ãŸã€‚libvirt ã‹ã‚‰æƒ…å ±" "ã‚’å–å¾—ã§ãã¾ã›ã‚“: %(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "インスタンス ã®ãƒ¡ãƒ¢ãƒªãƒ¼ä½¿ç”¨çжæ³ã‚’検査ã§ãã¾ã›ã‚“ã§" "ã—ãŸã€‚libvirt ã‹ã‚‰æƒ…報をå–å¾—ã§ãã¾ã›ã‚“。" #, python-format msgid "Failed to load any notification handlers for %s" msgstr "%s ã®é€šçŸ¥ãƒãƒ³ãƒ‰ãƒ©ãƒ¼ã‚’ロードã§ãã¾ã›ã‚“ã§ã—ãŸ" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "タイムスタンプ値 %s ã‚’è§£æžã§ãã¾ã›ã‚“ã§ã—ãŸ" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "%d データãƒã‚¤ãƒ³ãƒˆã®å…¬é–‹ã«å¤±æ•—ã—ã¾ã—ãŸã€‚ã“れらã¯å»ƒæ£„ã•れã¾ã™" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "%d データãƒã‚¤ãƒ³ãƒˆã®å…¬é–‹ã«å¤±æ•—ã—ã¾ã—ãŸã€‚ã“れらをキューã«å…¥ã‚Œã¦ãã ã•ã„" #, python-format msgid "Failed to record metering data: %s" msgstr "計測データを記録ã§ãã¾ã›ã‚“ã§ã—ãŸ: %s" #, python-format msgid "Filter expression not valid: %s" msgstr "フィルターå¼ãŒç„¡åйã§ã™: %s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "インスタンス %(name)s (%(instance_id)s) を無視ã—ã¦ã„ã¾ã™: %(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "インスタンス %(name)s を無視ã—ã¦ã„ã¾ã™: %(error)s" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "ロードãƒãƒ©ãƒ³ã‚µãƒ¼ %(loadbalancer_id)s を無視ã—ã¦ã„ã¾ã™" #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "プール %(pool_id)s を無視ã—ã¦ã„ã¾ã™" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "%(line)s 行目㮠%(column)s 列ã§å®šç¾©ãƒ•ァイル %(file)s ã® YAML æ§‹æ–‡ ãŒç„¡åйã§" "ã™ã€‚" #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "ç„¡åŠ¹ãªæœŸé–“ %(period)s: %(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "特性 %(trait)s ã®ç‰¹æ€§ã‚¿ã‚¤ãƒ— '%(type)s' ãŒç„¡åйã§ã™" msgid "Limit must be positive" msgstr "上é™ã¯æ­£ã®å€¤ã§ãªã‘れã°ãªã‚Šã¾ã›ã‚“" #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "ストレージドライãƒãƒ¼ã‹ã‚‰ id %s ã®ã‚¤ãƒ™ãƒ³ãƒˆãŒè¤‡æ•°è¿”ã•れã¾ã—ãŸ" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "複数㮠VM %s ㌠XenServer ã«è¦‹ã¤ã‹ã‚Šã¾ã—ãŸ" msgid "Must specify connection_url, and connection_password to use" msgstr "" "connection_url ã¨ã€ä½¿ç”¨ã™ã‚‹ connection_password を指定ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "%(name)s ã«ä½¿ç”¨ã§ãã‚‹ %(plugin)s ã¨ã„ã†åå‰ã®ãƒ—ラグインãŒã‚りã¾ã›ã‚“" msgid "Node Manager init failed" msgstr "ノードマãƒãƒ¼ã‚¸ãƒ£ãƒ¼ã®åˆæœŸåŒ–ã«å¤±æ•—ã—ã¾ã—ãŸ" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "%(aspect)s %(id)s ã«ã‚¢ã‚¯ã‚»ã‚¹ã™ã‚‹æ¨©é™ãŒã‚りã¾ã›ã‚“" #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "OpenDaylitght API ã‹ã‚‰ %(status)s %(reason)s ãŒè¿”ã•れã¾ã—ãŸ" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "Opencontrail API ã‹ã‚‰ %(status)s %(reason)s ãŒè¿”ã•れã¾ã—ãŸ" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "" "æ¼”ç®—å­ %(operator)s ã¯ã‚µãƒãƒ¼ãƒˆã•れã¦ã„ã¾ã›ã‚“。フィールド %(field)s ã§ä½¿ç”¨ã§ã" "ã‚‹ã®ã¯ç­‰ä¾¡æ¼”ç®—å­ã®ã¿ã§ã™ã€‚" #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "" "æ¼”ç®—å­ %(operator)s ã¯ã‚µãƒãƒ¼ãƒˆã•れã¦ã„ã¾ã›ã‚“。サãƒãƒ¼ãƒˆã•れã¦ã„る演算å­ã¯ " "%(supported)s ã§ã™ã€‚" #, python-format msgid "Order-by expression not valid: %s" msgstr "order-by å¼ãŒç„¡åйã§ã™: %s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "%(name)s ã«é–¢ã™ã‚‹ JSONPath ã®æŒ‡å®š '%(jsonpath)s' ã®ã‚¨ãƒ©ãƒ¼ã‚’è§£æžã—ã¾ã™: " "%(err)s" msgid "Period must be positive." msgstr "æœŸé–“ã¯æ­£ã®æ•°ã§ãªã‘れã°ãªã‚Šã¾ã›ã‚“。" #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "" "パイプライン %(pipeline)s: パブリッシャー %(pub)s ã‹ã‚‰ã®ã‚¨ãƒ©ãƒ¼ã®ç™ºç”Ÿå¾Œã® " "%(status)s" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "" "パイプライン %(pipeline)s: パブリッシャー %(pub)s ã‹ã‚‰ã®ã‚¨ãƒ©ãƒ¼ã®å¾Œã§ç¶šè¡Œã—ã¾" "ã™" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "" "パイプライン %(pipeline)s: 変æ›ãƒ—ログラム %(trans)s をフラッシュã™ã‚‹ã¨ãã«ã‚¨" "ラーãŒç™ºç”Ÿã—ã¾ã—ãŸ" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "" "パイプライン %(pipeline)s: %(smp)s ã«ã¤ã„ã¦å¤‰æ›ãƒ—ログラム %(trans)s ã‹ã‚‰ã‚¨" "ラーãŒç™ºç”Ÿã—ãŸå¾Œã«çµ‚了ã—ã¾ã™" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "ãƒ—ãƒ©ã‚°ã‚¤ãƒ³ãŒæŒ‡å®šã•れã¦ã„ã¾ã™ãŒã€%s ã«ãƒ—ラグインåãŒæä¾›ã•れã¦ã„ã¾ã›ã‚“" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "センサー %(mtr)s ã®ãƒãƒ¼ãƒªãƒ³ã‚°ãŒ %(cnt)s 回失敗ã—ã¾ã—ãŸ" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "ãƒãƒ¼ãƒªãƒ³ã‚° %(name)s ㌠%(cnt)s 回失敗ã—ã¾ã—ãŸ" #, python-format msgid "Pollster for %s is disabled!" msgstr "%s ã® pollster ãŒç„¡åйã«ãªã£ã¦ã„ã¾ã™" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "" "pollster %(name)s ãŒã“れ以上ソース %(source)s ã‚’ãƒãƒ¼ãƒªãƒ³ã‚°ã—ãªã„よã†ã«ã—ã¦ã" "ã ã•ã„" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "パブリッシャー local_queue 最大長を超ãˆã¾ã—ãŸã€‚å¤ã„æ–¹ã‹ã‚‰ %d 個ã®ã‚µãƒ³ãƒ—ルを除" "去ã—ã¾ã™" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "パブリッシュãƒãƒªã‚·ãƒ¼ãŒä¸æ˜Žã§ã™ (%s)。強制的ã«ãƒ‡ãƒ•ォルトã«è¨­å®šã•れã¾ã™" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW AdminOps API ã‹ã‚‰ %(status)s %(reason)s ãŒè¿”ã•れã¾ã—ãŸ" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "NorthBound REST API を使用ã—㟠OpenDaylight ã¸ã®æŽ¥ç¶šè¦æ±‚ãŒå¤±æ•—ã—ã¾ã—ãŸ" #, python-format msgid "Required field %s not specified" msgstr "必須フィールド %s ãŒæŒ‡å®šã•れã¦ã„ã¾ã›ã‚“" msgid "Resource" msgstr "リソース" msgid "Sample" msgstr "サンプル" msgid "Samples should be included in request body" msgstr "サンプルã¯è¦æ±‚本文ã«å«ã¾ã‚Œã‚‹å¿…è¦ãŒã‚りã¾ã™" #, python-format msgid "Skip loading extension for %s" msgstr "%s ã®æ‹¡å¼µæ©Ÿèƒ½ã®ãƒ­ãƒ¼ãƒ‰ã‚’スキップã—ã¾ã™" #, python-format msgid "String %s is not a valid isotime" msgstr "文字列 %s ã¯ç„¡åŠ¹ãª isotime ã§ã™" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "" "サンプル㨠gnocchi ã®ãƒªã‚½ãƒ¼ã‚¹/メトリクス間ã®ãƒžãƒƒãƒ”ングを定義ã™ã‚‹ Yaml ファイ" "ル" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "データ型 %(type)s ã¯ã‚µãƒãƒ¼ãƒˆã•れã¦ã„ã¾ã›ã‚“。サãƒãƒ¼ãƒˆã•れã¦ã„るデータ型ã®ãƒªã‚¹" "ト: %(supported)s" #, python-format msgid "The field 'fields' is required for %s" msgstr "%s ã«ã¯ãƒ•ィールド 'fields' ãŒå¿…è¦ã§ã™" msgid "The path for the file publisher is required" msgstr "ファイルパブリッシャーã®ãƒ‘スãŒå¿…è¦ã§ã™" #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "UDP: %s ã‹ã‚‰é€ä¿¡ã•れãŸãƒ‡ãƒ¼ã‚¿ã‚’デコードã§ãã¾ã›ã‚“" msgid "UDP: Unable to store meter" msgstr "UDP: メーターをä¿å­˜ã§ãã¾ã›ã‚“" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "データベースサーãƒãƒ¼ã«æŽ¥ç¶šã§ãã¾ã›ã‚“: %(errmsg)s。" #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "値 %(value)s ã‚’ã€æƒ³å®šã•れるデータ型 %(type)s ã«å¤‰æ›ã§ãã¾ã›ã‚“。" #, python-format msgid "Unable to discover resources: %s" msgstr "リソースを検出ã§ãã¾ã›ã‚“: %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "å¼ %(expr)s を評価ã§ãã¾ã›ã‚“: %(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "パブリッシャー %s をロードã§ãã¾ã›ã‚“" #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "ãƒã‚¤ãƒ‘ーãƒã‚¤ã‚¶ãƒ¼ã‚¤ãƒ³ã‚¹ãƒšã‚¯ã‚¿ãƒ¼ã‚’ロードã§ãã¾ã›ã‚“: %s" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "" "%(retries)d 回ã®å†è©¦è¡Œå¾Œã€1 次 mongodb ã«å†æŽ¥ç¶šã§ãã¾ã›ã‚“。中止ã—ã¾ã™ã€‚" #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "プライマリー mongodb ã«å†æŽ¥ç¶šã§ãã¾ã›ã‚“: %(errmsg)s。%(retry_interval)d 秒以" "内ã«å†è©¦è¡Œã—ã¾ã™ã€‚" msgid "Unable to send sample over UDP" msgstr "UDP 経由ã§ã‚µãƒ³ãƒ—ルをé€ä¿¡ã§ãã¾ã›ã‚“" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "%(value)s を想定ã•れるデータ型 %(type)s ã«å¤‰æ›ã™ã‚‹éš›ã«ã€æƒ³å®šã—ãªã„例外ãŒç™ºç”Ÿ" "ã—ã¾ã—ãŸã€‚" #, python-format msgid "Unknown discovery extension: %s" msgstr "䏿˜Žãªãƒ‡ã‚£ã‚¹ã‚«ãƒãƒªãƒ¼ã‚¨ã‚¯ã‚¹ãƒ†ãƒ³ã‚·ãƒ§ãƒ³: %s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "䏿˜Žãªãƒ¡ã‚¿ãƒ‡ãƒ¼ã‚¿ç¨®åˆ¥ã§ã™ã€‚キー (%s) ã¯ç…§ä¼šä¸å¯ã«ãªã‚Šã¾ã™ã€‚" #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "ロードãƒãƒ©ãƒ³ã‚µãƒ¼ %(id)s ã§ä¸æ˜ŽãªçŠ¶æ…‹ %(stat)s ã‚’å—ä¿¡ã—ã¾ã—ãŸã€‚サンプルをス" "キップã—ã¾ã™" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "ファイアウォール %(id)s ã§ä¸æ˜ŽãªçŠ¶æ…‹ %(stat)s ã‚’å—ä¿¡ã—ã¾ã—ãŸã€‚サンプルをス" "キップã—ã¾ã™" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "リスナー %(id)s ã§ä¸æ˜ŽãªçŠ¶æ…‹ %(stat)s ã‚’å—ä¿¡ã—ã¾ã—ãŸã€‚サンプルをスキップã—ã¾" "ã™" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "メンãƒãƒ¼ %(id)s ã§ä¸æ˜ŽãªçŠ¶æ…‹ %(stat)s ã‚’å—ä¿¡ã—ã¾ã—ãŸã€‚サンプルをスキップã—ã¾" "ã™" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "プール %(id)s ã§ä¸æ˜ŽãªçŠ¶æ…‹ %(stat)s ã‚’å—ä¿¡ã—ã¾ã—ãŸã€‚サンプルをスキップã—ã¾ã™" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "仮想 IP %(id)s ã§ä¸æ˜ŽãªçŠ¶æ…‹ %(stat)s ã‚’å—ä¿¡ã—ã¾ã—ãŸã€‚サンプルをスキップã—ã¾ã™" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "vpn %(id)s ã§ä¸æ˜ŽãªçŠ¶æ…‹ %(stat)s ã‚’å—ä¿¡ã—ã¾ã—ãŸã€‚サンプルをスキップã—ã¾ã™" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "VMware vSphere ã§ VM %s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" #, python-format msgid "VM %s not found in XenServer" msgstr "VM %s ㌠XenServer ã«è¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" msgid "Wrong sensor type" msgstr "ã‚»ãƒ³ã‚µãƒ¼ç¨®åˆ¥ãŒæ­£ã—ãã‚りã¾ã›ã‚“" msgid "XenAPI not installed" msgstr "XenAPI ãŒã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«ã•れã¦ã„ã¾ã›ã‚“" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "定義ファイル %(file)s ã§ã®èª­ã¿å–り㮠YAML エラー" msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." msgstr "" "Aodh ãŒç„¡åŠ¹åŒ–ã•れるã‹ä½¿ç”¨ä¸å¯ã®å ´åˆã€URL ãŒä½¿ç”¨ã§ããªã„ã“ã¨ã‚’警告ã—ã¾ã™ã€‚" #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "%(id)s ã® CPU 時間をå–å¾—ã§ãã¾ã›ã‚“ã§ã—ãŸ: %(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "" "Gnocchi を有効化ã—ãŸå ´åˆã¯ã€direct オプションを True ã«è¨­å®šã™ã‚‹ã“ã¨ã¯ã§ãã¾ã›" "ん。" #, python-format msgid "dropping out of time order sample: %s" msgstr "期é™åˆ‡ã‚Œã®ã‚ªãƒ¼ãƒ€ãƒ¼ã‚µãƒ³ãƒ—ルを廃棄ã—ã¦ã„ã¾ã™: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "サンプル (先行ãªã—) を廃棄ã—ã¦ã„ã¾ã™: %s" msgid "ipmitool output length mismatch" msgstr "ipmitool 出力ã®é•·ã•ãŒä¸€è‡´ã—ã¾ã›ã‚“" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes 㨠backup_count ã¯æ•°å€¤ã§ãªã‘れã°ãªã‚Šã¾ã›ã‚“。" #, python-format msgid "message signature invalid, discarding message: %r" msgstr "メッセージシグニãƒãƒ£ãƒ¼ãŒç„¡åйã§ã™ã€‚メッセージを破棄ã—ã¾ã™: %r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "IPMI センサーデータã®è§£æžã«å¤±æ•—ã—ã¾ã—ãŸã€‚指定ã•れãŸå…¥åŠ›ã‹ã‚‰ãƒ‡ãƒ¼ã‚¿ãŒå–å¾—ã•れã¾" "ã›ã‚“ã§ã—ãŸ" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "IPMI センサーデータã®è§£æžã«å¤±æ•—ã—ã¾ã—ãŸã€‚䏿˜Žãªã‚»ãƒ³ã‚µãƒ¼ç¨®åˆ¥ã§ã™ã€‚" msgid "running ipmitool failure" msgstr "ipmitool ã®å®Ÿè¡Œã«å¤±æ•—ã—ã¾ã—ãŸ" ceilometer-6.1.5/ceilometer/locale/pt_BR/0000775000567000056710000000000013072745164021422 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/pt_BR/LC_MESSAGES/0000775000567000056710000000000013072745164023207 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po0000664000567000056710000003775313072744706025717 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Gabriel Wainer, 2013 # Gabriel Wainer, 2013 # Carlos Marques , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-05-19 11:00+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-05-03 10:38+0000\n" "Last-Translator: Carlos Marques \n" "Language: pt-BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Portuguese (Brazil)\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)s Não Encontrada" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "O transformador aritmético deve usar pelo menos um medidor na expressão '%s'" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "" "Não é possível criar a tabela %(table_name)s; ela já existe. Ignorando o erro" #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "Continuar após erro de %(name)s: %(error)s" #, python-format msgid "Could not connect slave host: %s " msgstr "Não foi possível conectar-se ao host escravo: %s" #, python-format msgid "Could not connect to XenAPI: %s" msgstr "Não foi possível conectar-se ao XenAPI: %s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "Não foi possível obter o Uso de CPU para %(id)s: %(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "Não foi possível obter o Uso de Memória para %(id)s: %(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "Não foi possível obter a utilização de CPU da máquina virtual %s" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "Não foi possível obter o endereço IP da instância %s" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "" "O destino do despachante não foi configurado, nenhum medidor será postado. " "Defina o destino no arquivo ceilometer.conf" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Descartando Notificação %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Erro de libvirt ao consultar instância : [Código " "de Erro %(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "Erro ao analisar a resposta de HTTP: %s" msgid "Error stopping pollster." msgstr "Erro ao parar o pesquisador. " msgid "Event" msgstr "Evento" msgid "Expression evaluated to a NaN value!" msgstr "Expressão avaliada para um valor NaN!" #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "Falha ao importar extensão para %(name)s: %(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Falha ao inspecionar os dados da instância , " "estado do domínio é SHUTOFF." #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "Falha ao inspecionar o uso da memória de %(instance_uuid)s, não é possível " "obter informações a partir de libvirt: %(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "Falha ao inspecionar o uso da memória da instância , não é possível obter informações a partir de libvirt." #, python-format msgid "Failed to load any notification handlers for %s" msgstr "Falha ao carregar qualquer manipulador de notificações para %s" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Falha ao analisar o valor do registro de data e hora %s" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "Falha ao publicar %d pontos de dados, descartando-os" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "Falha ao publicar %d pontos de dados, enfileire-os" #, python-format msgid "Failed to record metering data: %s" msgstr "Falha ao gravar dados de medição: %s" #, python-format msgid "Filter expression not valid: %s" msgstr "Expressão de filtro inválida: %s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "Ignorando a instância %(name)s (%(instance_id)s): %(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "Ignorando a instância %(name)s: %(error)s" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "Ignorando loadbalancer %(loadbalancer_id)s" #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "Ignorando conjunto%(pool_id)s" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Sintaxe YAML inválida no arquivo Definitions %(file)s na linha: %(line)s, " "coluna: %(column)s." #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "Período inválido %(period)s: %(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "Tipo de traço inválido '%(type)s' para traço %(trait)s" msgid "Limit must be positive" msgstr "O limite deve ser positivo" #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "" "Mais de um evento com o ID %s retornado a partir do driver de armazenamento" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "Várias máquinas virtuais %s localizadas no XenServer" msgid "Must specify connection_url, and connection_password to use" msgstr "connection_url e connection_password devem ser especificados para uso" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Nenhum plug-in nomeado %(plugin)s disponível para %(name)s" msgid "Node Manager init failed" msgstr "A inicialização do Gerenciador de Nós falhou" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Não Autorizado a acessar %(aspect)s %(id)s" #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "API OpenDaylitght retornou %(status)s %(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "A API Opencontrail retornou%(status)s%(reason)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "" "O operador %(operator)s não é suportado. Somente o operador de igualdade " "está disponível para o campo %(field)s" #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "" "O operador %(operator)s não é suportado. Os operadores suportados são: " "%(supported)s" #, python-format msgid "Order-by expression not valid: %s" msgstr "Expressão solicitada inválida: %s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Erro de análise na especificação JSONPath '%(jsonpath)s' para %(name)s: " "%(err)s" msgid "Period must be positive." msgstr "O período deve ser positivo." #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "Pipeline %(pipeline)s: %(status)s após erro do publicador %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "Pipeline %(pipeline)s: Continuar após erro do publicador %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "Pipeline %(pipeline)s: Erro ao limpar o transformador %(trans)s" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "" "Pipeline %(pipeline)s: Sair após erro do transformador %(trans)s para %(smp)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Plug-in especificado, mas nenhum nome de plug-in fornecido para %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "O sensor de pesquisa %(mtr)s falhou %(cnt)s vezes!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "A pesquisa %(name)s falhou para %(cnt)s vezes!" #, python-format msgid "Pollster for %s is disabled!" msgstr "O pesquisador para %s está desativado!" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "Impeça o pesquisador %(name)s de sondar a origem %(source)s novamente!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "O comprimento máximo de local_queue do publicador foi excedido, descartando " "%d amostras mais antigas" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "A política de publicação é desconhecida (%s), forçar para o padrão" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "A API AdminOps RGW retornou %(status)s %(reason)s" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "" "A solicitação falhou ao conectar-se ao OpenDaylight com a API REST NorthBound" #, python-format msgid "Required field %s not specified" msgstr "Campo obrigatório %s não especificado" msgid "Resource" msgstr "Recurso" msgid "Sample" msgstr "Amostra" msgid "Samples should be included in request body" msgstr "As amostras devem ser incluídas no corpo da solicitação" #, python-format msgid "Skip loading extension for %s" msgstr "Ignorar a extensão de carregamento para %s" #, python-format msgid "String %s is not a valid isotime" msgstr "A sequência %s não é um isotime válido" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "" "O arquivo Yaml que define o mapeamento entre amostras e recursos/métricas " "gnocchi" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "O tipo de dados %(type)s não é suportado. A lista de tipos de dados " "suportados é: %(supported)s" #, python-format msgid "The field 'fields' is required for %s" msgstr "O campo 'fields' é necessário para %s" msgid "The path for the file publisher is required" msgstr "O caminho para o publicador do arquivo é necessário" #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "UDP: Não é possível decodificar dados enviados pelo %s" msgid "UDP: Unable to store meter" msgstr "UDP: Não é possível armazenar o medidor" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "Não é possível conectar-se ao servidor de banco de dados: %(errmsg)s." #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Não é possível converter o valor %(value)s no tipo de dados esperado " "%(type)s." #, python-format msgid "Unable to discover resources: %s" msgstr "Não é possível descobrir recursos: %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "Não é possível avaliar expressão %(expr)s:%(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "Não é possível carregar o publicador %s" #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "Não é possível carregar o inspetor do hypervisor: %s" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "" "Não é possível reconectar-se ao mongodb primário após %(retries)d novas " "tentativas. Desistindo." #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "Não é possível reconectar-se ao mongodb primário: %(errmsg)s. Tentando " "novamente em %(retry_interval)d segundos." msgid "Unable to send sample over UDP" msgstr "Não é possível enviar a amostra sobre UDP" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Exceção inesperada ao converter %(value)s no tipo de dados esperado %(type)s." #, python-format msgid "Unknown discovery extension: %s" msgstr "Extensão de descoberta desconhecida: %s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "Tipo de metadados desconhecido. A chave (%s) não será consultável." #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido no Balanceador de Carga %(id)s, " "ignorando a amostra" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "Status desconhecido %(stat)s recebido no fw %(id)s, ignorando a amostra" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido no listener %(id)s, ignorando a amostra" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido no membro %(id)s, ignorando a amostra" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido no conjunto %(id)s, ignorando a amostra" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido em vip %(id)s, ignorando a amostra" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido recebido no vpn %(id)s, ignorando a " "amostra" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "Máquina virtual %s não localizada no VMware vSphere" #, python-format msgid "VM %s not found in XenServer" msgstr "Máquina virtual %s não localizada no XenServer" msgid "Wrong sensor type" msgstr "Tipo de sensor errado" msgid "XenAPI not installed" msgstr "XenAPI não instalado" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "Erro YAML ao ler o arquivo Definitions %(file)s" msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." msgstr "" "A URL de alarmes está indisponível quando o Aodh está desativado ou " "indisponível." #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "não foi possível obter o tempo de CPU para %(id)s: %(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "A opção direta não pode ser true quando o Gnocchi está ativado. " #, python-format msgid "dropping out of time order sample: %s" msgstr "eliminando amostra de pedido fora do prazo: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "descartando amostra sem predecessor: %s" msgid "ipmitool output length mismatch" msgstr "incompatibilidade no comprimento da saída do ipmitool" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes e backup_count devem ser números." #, python-format msgid "message signature invalid, discarding message: %r" msgstr "assinatura de mensagem inválida, descartando mensagem: %r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "A análise dos dados do sensor IPMI falhou. Nenhum dado recuperado da entrada " "fornecida" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "A análise dos dados do sensor IPMI falhou, tipo de sensor desconhecido" msgid "running ipmitool failure" msgstr "falha ao executar o ipmitool" ceilometer-6.1.5/ceilometer/locale/ru/0000775000567000056710000000000013072745164021042 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/ru/LC_MESSAGES/0000775000567000056710000000000013072745164022627 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/ru/LC_MESSAGES/ceilometer.po0000664000567000056710000004745713072744706025341 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Grigory Mokhin , 2016. #zanata # Lucas Palm , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-05-19 11:00+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-10 08:01+0000\n" "Last-Translator: Grigory Mokhin \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" "%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Russian\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)s не найден" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "ÐрифметичеÑкий преобразователь должен иÑпользовать Ñ…Ð¾Ñ‚Ñ Ð±Ñ‹ один Ñчетчик в " "выражении %s'" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "" "Ðе удалоÑÑŒ Ñоздать таблицу %(table_name)s: уже ÑущеÑтвует. Игнорирование " "ошибки" #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "Продолжить поÑле ошибки Ñ %(name)s: %(error)s" #, python-format msgid "Could not connect slave host: %s " msgstr "Ðе удалоÑÑŒ подключитьÑÑ Ðº подчиненному хоÑту: %s " #, python-format msgid "Could not connect to XenAPI: %s" msgstr "Ðе удалоÑÑŒ подключитьÑÑ Ðº XenAPI: %s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "" "Ðе удалоÑÑŒ получить информацию об иÑпользовании процеÑÑора Ð´Ð»Ñ %(id)s: %(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "" "Ðе удалоÑÑŒ получить информацию об иÑпользовании памÑти Ð´Ð»Ñ %(id)s: %(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "" "Ðе удалоÑÑŒ получить информацию об иÑпользовании CPU Ð´Ð»Ñ Ð²Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð¾Ð¹ машины %s" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "Ðе удалоÑÑŒ получить IP-Ð°Ð´Ñ€ÐµÑ ÑкземплÑра %s" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "" "Целевой объект диÑпетчера не задан, Ñ„ÑƒÐ½ÐºÑ†Ð¸Ñ Ð¸Ð·Ð¼ÐµÑ€ÐµÐ½Ð¸Ñ Ð½Ðµ будет опубликована. " "Укажите целевой объект в файле ceilometer.conf" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Удаление ÑƒÐ²ÐµÐ´Ð¾Ð¼Ð»ÐµÐ½Ð¸Ñ %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Возникла ошибка в libvirt при поиÑке ÑкземплÑра <имÑ=%(name)s, ИД=%(id)s>: " "[Код ошибки: %(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "Ошибка анализа ответа HTTP: %s" msgid "Error stopping pollster." msgstr "Ошибка оÑтановки опрашивающего объекта." msgid "Event" msgstr "Событие" msgid "Expression evaluated to a NaN value!" msgstr "Результат вычиÑÐ»ÐµÐ½Ð¸Ñ Ð²Ñ‹Ñ€Ð°Ð¶ÐµÐ½Ð¸Ñ - значение NaN!" #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "Ðе удалоÑÑŒ импортировать раÑширение Ð´Ð»Ñ %(name)s: %(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Ðе удалоÑÑŒ проверить данные ÑкземплÑра <имÑ=%(name)s, ИД=%(id)s>, ÑоÑтоÑние " "домена - SHUTOFF." #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "Ðе удалоÑÑŒ проверить иÑпользование памÑти ÑкземплÑром %(instance_uuid)s, не " "удалоÑÑŒ получить информацию от libvirt: %(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "Ðе удалоÑÑŒ проверить иÑпользование памÑти ÑкземплÑром <имÑ=%(name)s, ИД=" "%(id)s>, не удалоÑÑŒ получить информацию от libvirt." #, python-format msgid "Failed to load any notification handlers for %s" msgstr "Ðе удалоÑÑŒ загрузить обработчики уведомлений Ð´Ð»Ñ %s" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Ошибка анализа Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð²Ñ€ÐµÐ¼ÐµÐ½Ð¸ %s" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "Ðе удалоÑÑŒ опубликовать %d точек данных, выполнÑетÑÑ Ð¸Ñ… удаление" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "Ðе удалоÑÑŒ опубликовать %d точек данных, Ñоздайте Ð´Ð»Ñ Ð½Ð¸Ñ… очередь" #, python-format msgid "Failed to record metering data: %s" msgstr "Ðе удалоÑÑŒ запиÑать данные измерений: %s" #, python-format msgid "Filter expression not valid: %s" msgstr "ÐедопуÑтимое выражение фильтра: %s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "Игнорирование ÑкземплÑра %(name)s (%(instance_id)s) : %(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "Игнорирование ÑкземплÑра %(name)s: %(error)s" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "БаланÑировщик нагрузки %(loadbalancer_id)s игнорируетÑÑ" #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "Пул %(pool_id)s игнорируетÑÑ" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "ÐедопуÑтимый ÑинтакÑÐ¸Ñ YAML в файле определений %(file)s; Ñтрока: %(line)s, " "Ñтолбец: %(column)s." #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "ÐедопуÑтимый интервал %(period)s: %(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "ÐедопуÑтимый тип оÑобенноÑти %(type)s Ð´Ð»Ñ Ð¾ÑобенноÑти %(trait)s" msgid "Limit must be positive" msgstr "Ограничение должно быть положительным" #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "Из драйвера хранилища возвращено неÑколько Ñобытий Ñ Ð˜Ð” %s" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "Ðайдено неÑколько виртуальных машин %s в XenServer" msgid "Must specify connection_url, and connection_password to use" msgstr "Ðеобходимо указать connection_url и connection_password" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Ðет доÑтупного Ð¼Ð¾Ð´ÑƒÐ»Ñ %(plugin)s Ð´Ð»Ñ %(name)s" msgid "Node Manager init failed" msgstr "Сбой инициализации админиÑтратора узлов" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Ðет прав доÑтупа к %(aspect)s %(id)s" #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "Ð¤ÑƒÐ½ÐºÑ†Ð¸Ñ API OpenDaylight вернула %(status)s %(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "API Opencontrail возвратил %(status)s %(reason)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "" "Оператор %(operator)s не поддерживаетÑÑ. Ð”Ð»Ñ Ð¿Ð¾Ð»Ñ %(field)s возможен только " "оператор равенÑтва" #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "" "Оператор %(operator)s не поддерживаетÑÑ. Поддерживаемые операторы: " "%(supported)s" #, python-format msgid "Order-by expression not valid: %s" msgstr "ÐедопуÑтимое выражение Ñортировки: %s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Ошибка анализа Ñпецификации JSONPath %(jsonpath)s Ð´Ð»Ñ %(name)s: %(err)s" msgid "Period must be positive." msgstr "Период должен быть положительным." #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "Конвейер %(pipeline)s: %(status)s поÑле ошибки от публикатора %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "Конвейер %(pipeline)s: Продолжение поÑле ошибки из публикатора %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "Конвейер %(pipeline)s: Ошибка выгрузки Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %(trans)s" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "" "Конвейер %(pipeline)s: Выход поÑле ошибки из Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %(trans)s Ð´Ð»Ñ " "%(smp)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Указан модуль, но не передано Ð¸Ð¼Ñ Ð¼Ð¾Ð´ÑƒÐ»Ñ Ð´Ð»Ñ %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "%(cnt)s-кратный Ñбой датчика опроÑа %(mtr)s!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "ÐžÐ¿Ñ€Ð¾Ñ %(name)s не удалоÑÑŒ выполнить %(cnt)s раз." #, python-format msgid "Pollster for %s is disabled!" msgstr "Опрашивающий объект Ð´Ð»Ñ %s выключен!" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "" "Сделайте так, чтобы опрашивающий объект %(name)s больше не опрашивал " "иÑточник %(source)s!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "Превышена макÑÐ¸Ð¼Ð°Ð»ÑŒÐ½Ð°Ñ Ð´Ð»Ð¸Ð½Ð° local_queue публикатора, удаление %d Ñамых " "Ñтарых образцов" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "Ð¡Ñ‚Ñ€Ð°Ñ‚ÐµÐ³Ð¸Ñ Ð¿ÑƒÐ±Ð»Ð¸ÐºÐ°Ñ†Ð¸Ð¸ неизвеÑтна (%s). По умолчанию принудительнаÑ" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "Ð¤ÑƒÐ½ÐºÑ†Ð¸Ñ API RGW AdminOps вернула %(status)s %(reason)s" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "Сбой запроÑа на подключение к OpenDaylight Ñ API REST NorthBound" #, python-format msgid "Required field %s not specified" msgstr "Ðе указано обÑзательное поле %s" msgid "Resource" msgstr "РеÑурÑ" msgid "Sample" msgstr "Образец" msgid "Samples should be included in request body" msgstr "Образцы должны включатьÑÑ Ð² тело запроÑа" #, python-format msgid "Skip loading extension for %s" msgstr "ПропуÑтить загрузку раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ð´Ð»Ñ %s" #, python-format msgid "String %s is not a valid isotime" msgstr "Строка %s не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым значением isotime" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "" "Файл Yaml, определÑющий ÑвÑзи между образцами и реÑурÑами gnocchi " "(показателÑми)" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "Тип данных %(type)s не поддерживаетÑÑ. СпиÑок поддерживаемых типов данных: " "%(supported)s" #, python-format msgid "The field 'fields' is required for %s" msgstr "Поле 'fields' ÑвлÑетÑÑ Ð¾Ð±Ñзательным Ð´Ð»Ñ %s" msgid "The path for the file publisher is required" msgstr "ТребуетÑÑ Ð¿ÑƒÑ‚ÑŒ Ð´Ð»Ñ Ð¿ÑƒÐ±Ð»Ð¸ÐºÐ°Ñ‚Ð¾Ñ€Ð° файлов" #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "UDP: не удаетÑÑ Ð´ÐµÐºÐ¾Ð´Ð¸Ñ€Ð¾Ð²Ð°Ñ‚ÑŒ данные, отправленные %s" msgid "UDP: Unable to store meter" msgstr "UDP: не удалоÑÑŒ Ñохранить Ñчетчик" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "Ðе удалоÑÑŒ подключитьÑÑ Ðº Ñерверу базы данных: %(errmsg)s." #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Преобразовать значение %(value)s в ожидаемый тип данных %(type)s невозможно." #, python-format msgid "Unable to discover resources: %s" msgstr "Ðе удалоÑÑŒ найти реÑурÑÑ‹: %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "ВычиÑлить выражение %(expr)s невозможно: %(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "Ðе удалоÑÑŒ загрузить публикатор %s" #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "Ðе удалоÑÑŒ загрузить инÑпектор гипервизора: %s" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "" "Ðе удалоÑÑŒ повторно подключитьÑÑ Ðº оÑновной базе данных mongodb поÑле " "%(retries)d попыток. Дальнейшие попытки прекращены." #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "Ðе удалоÑÑŒ повторно подключитьÑÑ Ðº оÑновной mongodb: %(errmsg)s. Повторное " "подключение через %(retry_interval)d Ñекунд." msgid "Unable to send sample over UDP" msgstr "Ðе удалоÑÑŒ отправить образец по UDP" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Возникла Ð½ÐµÐ¿Ñ€ÐµÐ´Ð²Ð¸Ð´ÐµÐ½Ð½Ð°Ñ Ð¸ÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð¿Ñ€Ð¸ преобразовании %(value)s " "в ожидаемый тип данных %(type)s." #, python-format msgid "Unknown discovery extension: %s" msgstr "ÐеизвеÑтное раÑширение поиÑка: %s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "ÐеизвеÑтный тип метаданных. Ключ (%s) Ð½ÐµÐ»ÑŒÐ·Ñ Ð±ÑƒÐ´ÐµÑ‚ запрашивать." #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Ð’ баланÑировщике нагрузки %(id)s получено неизвеÑтное ÑоÑтоÑние %(stat)s, " "пример пропуÑкаетÑÑ" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "Ð’ fw %(id)s получено неизвеÑтное ÑоÑтоÑние %(stat)s,пример пропуÑкаетÑÑ" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "Ð’ обработчике %(id)s получено неизвеÑтное ÑоÑтоÑние %(stat)s, пример " "пропуÑкаетÑÑ" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "Ð’ учаÑтнике %(id)s получено неизвеÑтное ÑоÑтоÑние %(stat)s, пример " "пропуÑкаетÑÑ" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "Ð’ пуле %(id)s получено неизвеÑтное ÑоÑтоÑние %(stat)s,пример пропуÑкаетÑÑ" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "Ð’ vip %(id)s получено неизвеÑтное ÑоÑтоÑние %(stat)s,пример пропуÑкаетÑÑ" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Ð’ VPN %(id)s получено неизвеÑтное ÑоÑтоÑние %(stat)s, пример пропуÑкаетÑÑ" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "Ð’Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð°Ñ Ð¼Ð°ÑˆÐ¸Ð½Ð° %s не найдена в VMware vSphere" #, python-format msgid "VM %s not found in XenServer" msgstr "Ðе найдена Ð²Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð°Ñ Ð¼Ð°ÑˆÐ¸Ð½Ð° %s в XenServer" msgid "Wrong sensor type" msgstr "Ðеверный тип датчика" msgid "XenAPI not installed" msgstr "XenAPI не уÑтановлен" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "Ошибка YAML при чтении файла определений %(file)s" #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "не удалоÑÑŒ получить процеÑÑорное Ð²Ñ€ÐµÐ¼Ñ Ð´Ð»Ñ %(id)s: %(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "Параметр direct не может быть равен true, еÑли включен Gnocchi." #, python-format msgid "dropping out of time order sample: %s" msgstr "удаление образца, выпадающего из хронологичеÑкого порÑдка: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "удаление образца без предшеÑтвенника: %s" msgid "ipmitool output length mismatch" msgstr "неÑоответÑтвие длины вывода ipmitool" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes и backup_count должны быть чиÑлами." #, python-format msgid "message signature invalid, discarding message: %r" msgstr "недопуÑÑ‚Ð¸Ð¼Ð°Ñ Ð¿Ð¾Ð´Ð¿Ð¸ÑÑŒ ÑообщениÑ, удаление ÑообщениÑ: %r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "Ñбой анализа данных датчика IPMI, не получены данные из переданного ввода" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "Ñбой анализа данных датчика IPMI, неизвеÑтный тип датчика" msgid "running ipmitool failure" msgstr "Ñбой Ð²Ñ‹Ð¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ ipmitool" ceilometer-6.1.5/ceilometer/locale/de/0000775000567000056710000000000013072745164021004 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/de/LC_MESSAGES/0000775000567000056710000000000013072745164022571 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-error.po0000664000567000056710000001215013072744706027167 0ustar jenkinsjenkins00000000000000# Monika Wolf , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-05-19 11:00+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-03 03:37+0000\n" "Last-Translator: Monika Wolf \n" "Language-Team: German\n" "Language: de\n" "X-Generator: Zanata 3.7.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" #, python-format msgid "Cannot load inspector %(name)s: %(err)s" msgstr "Inspector %(name)s kann nicht geladen werden: %(err)s" #, python-format msgid "Could not get Resident Memory Usage for %(id)s: %(e)s" msgstr "" "Die Verwendung des residenten Speichers für %(id)s konnte nicht abgerufen " "werden: %(e)s" #, python-format msgid "Dispatcher failed to handle the %s, requeue it." msgstr "" "Dispatcher konnte %s nicht verarbeiten. Erneut in Warteschlange stellen." msgid "Error connecting to coordination backend." msgstr "Fehler beim Herstellen einer Verbindung zum Koordinierungs-Back-End." msgid "Error getting group membership info from coordination backend." msgstr "" "Fehler beim Abrufen von Mitgliedschaftsinformationen vom Koordinierungs-Back-" "End." #, python-format msgid "Error joining partitioning group %s, re-trying" msgstr "" "Fehler beim Beitreten zur Partitionierungsgruppe %s. Operation wird " "wiederholt." #, python-format msgid "Error loading meter definition : %(err)s" msgstr "Fehler beim Laden der Messdefinition : %(err)s" #, python-format msgid "Error processing event and it will be dropped: %s" msgstr "Fehler beim Verarbeiten des Ereignisses und es wird gelöscht: %s" msgid "Error sending a heartbeat to coordination backend." msgstr "" "Fehler beim Senden eines Überwachungssignals an das Koordinierungs-Back-End." msgid "Fail to process a notification" msgstr "Eine Benachrichtigung konnte nicht verarbeitet werden." msgid "Fail to process notification" msgstr "Benachrichtigung konnte nicht verarbeitet werden." msgid "Failed to connect to Gnocchi." msgstr "Fehler beim Herstellen einer Verbindung zu Gnocchi." #, python-format msgid "Failed to connect to Kafka service: %s" msgstr "Fehler beim Herstellen einer Verbindung zum Kafka-Service: %s" #, python-format msgid "Failed to connect to db, purpose %(purpose)s re-try later: %(err)s" msgstr "" "Fehler beim Herstellen einer Verbindung zur Datenbank. Zweck: %(purpose)s " "Später erneut versuchen: %(err)s" #, python-format msgid "Failed to connect to db, purpose %(purpose)s retry later: %(err)s" msgstr "" "Fehler beim Herstellen einer Verbindung zur Datenbank. Zweck: %(purpose)s " "Später erneut versuchen: %(err)s" #, python-format msgid "Failed to load resource due to error %s" msgstr "Fehler beim Laden der Ressource aufgrund des folgenden Fehlers %s" #, python-format msgid "Failed to record event: %s" msgstr "Das Ereignis konnte nicht aufgezeichnet werden: %s" #, python-format msgid "Failed to record metering data: %s" msgstr "Messdaten wurden nicht aufgezeichnet: %s" #, fuzzy msgid "Failed to retry to send sample data with max_retry times" msgstr "" "Fehler bei dem Wiederholungsversuch, Beispieldaten mit max_retry times zu " "senden." #, fuzzy msgid "" "Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: Current agent is " "not part of group and cannot take tasks" msgstr "" "Gruppen-ID: %{group_id}s, Mitglieder: %{members}s, Ich: %{me}s: Der aktuelle " "Agent ist nicht Teil der Gruppe und kann keine Tasks übernehmen." #, python-format msgid "Invalid type %s specified" msgstr "Ungültigen Typ %s angegeben" #, python-format msgid "Missing field %s" msgstr "Fehlendes Feld %s" msgid "Passed resource dict must contain keys resource_id and resource_url." msgstr "" "Das übergebene Ressourcenwörterverzeichnis muss die Schlüssel für " "resource_id und resource_url enthalten." #, python-format msgid "Required field %(field)s should be a %(type)s" msgstr "Erforderliches Feld %(field)s muss %(type)s sein." #, python-format msgid "Required field %s not specified" msgstr "Erforderliches Feld %s nicht angegeben." #, python-format msgid "Required fields %s not specified" msgstr "Erforderliche Felder %s nicht angegeben." #, python-format msgid "Skip invalid resource %s" msgstr "Ungültige Ressource %s überspringen" #, python-format msgid "Skipping %(name)s, keystone issue: %(exc)s" msgstr "%(name)s wird übersprungen, Keystone-Problem: %(exc)s" msgid "Status Code: %{code}s. Failed todispatch event: %{event}s" msgstr "Statuscode: %{code}s. Fehler beim Versenden des Ereignisses: %{event}s" #, python-format msgid "Unable to load changed event pipeline: %s" msgstr "Die geänderte Ereignispipeline konnte nicht geladen werden: %s" #, python-format msgid "Unable to load changed pipeline: %s" msgstr "Die geänderte Pipeline konnte nicht geladen werden: %s" #, python-format msgid "Unrecognized type value %s" msgstr "Nicht erkannter Typwert %s" #, python-format msgid "inspector call failed for %(ident)s host %(host)s: %(err)s" msgstr "Inspector-Aufruf fehlgeschlagen für %(ident)s Host %(host)s: %(err)s" ceilometer-6.1.5/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-warning.po0000664000567000056710000001136013072744706027505 0ustar jenkinsjenkins00000000000000# Monika Wolf , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-05-19 11:00+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-04 10:08+0000\n" "Last-Translator: Monika Wolf \n" "Language-Team: German\n" "Language: de\n" "X-Generator: Zanata 3.7.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" #, fuzzy msgid "Can't connect to keystone, assuming aodh is disabled and retry later." msgstr "" "Herstellen einer Verbindung zu Keystone nicht möglich. Aodh ist " "möglicherweise inaktiviert. Operation wird später wiederholt." #, fuzzy msgid "Can't connect to keystone, assuming gnocchi is disabled and retry later" msgstr "" "Herstellen einer Verbindung zu Keystone nicht möglich. Gnocchi ist " "möglicherweise inaktiviert. Operation wird päter wiederholt." msgid "" "Cannot extract tasks because agent failed to join group properly. Rejoining " "group." msgstr "" "Extrahieren der Tasks nicht möglich, da der Agent nicht ordnungsgemäß in die " "Gruppe eingebunden werden konnte. Operation zum Wiedereinbinden in die " "Gruppe wird durchgeführt." #, python-format msgid "" "Cannot inspect data of %(pollster)s for %(instance_id)s, non-fatal reason: " "%(exc)s" msgstr "" "Die %(pollster)s-Daten für %(instance_id)s können nicht untersucht werden. " "Behebbare Ursache: %(exc)s" #, python-format msgid "Dropping out of time order sample: %s" msgstr "" "Löschen des nicht in die zeitliche Reihenfolge gehörenden Beispiels: %s" #, python-format msgid "Dropping sample with no predecessor: %s" msgstr "Beispiel ohne Vorgänger wird gelöscht: %s" #, python-format msgid "Failed to load any dispatchers for %s" msgstr "Es konnten keine Dispatcher für %s geladen werden." #, python-format msgid "Ignore unrecognized field %s" msgstr "Nicht erkanntes Feld %s ignorieren" #, python-format msgid "Invalid status, skipping IP address %s" msgstr "Ungültiger Status. IP-Adresse %s wird übersprungen." msgid "Negative delta detected, dropping value" msgstr "Negatives Delta erkannt. Wert wird verworfen." #, python-format msgid "No endpoints found for service %s" msgstr "Es wurden keine Endpunkte für den Service %s gefunden." msgid "" "Non-metric meters may be collected. It is highly advisable to disable these " "meters using ceilometer.conf or the pipeline.yaml" msgstr "" "Es werden möglicherweise nicht metrische Daten erfasst. Es wird dringend " "empfohlen, diese Zähler über die Datei ceilometer.conf oder pipeline.yaml zu " "inaktivieren." #, python-format msgid "" "Skipping %(name)s, %(service_type)s service is not registered in keystone" msgstr "" "%(name)s wird übersprungen. Der Service %(service_type)s ist nicht in " "Keystone registriert." #, python-format msgid "Skipping duplicate meter definition %s" msgstr "Doppelte Messdefinition %s wird übersprungen." msgid "" "ceilometer-api started with aodh enabled. Alarms URLs will be redirected to " "aodh endpoint." msgstr "" "Die ceilometer-api wurde mit aktiviertem aodh gestartet. Alarm-URLs werden " "an den aodh-Endpunkt umgeleitet. " msgid "" "ceilometer-api started with gnocchi enabled. The resources/meters/samples " "URLs are disabled." msgstr "" "Die ceilometer-api wurde mit aktiviertem Gnocchi gestartet. Die URLs für " "resources/meters/samples sind inaktiviert." #, python-format msgid "event signature invalid, discarding event: %s" msgstr "Ereignissignatur ungültig. Ereignis wird verworfen: %s" #, python-format msgid "message signature invalid, discarding message: %r" msgstr "Nachrichtensignatur ungültig, Nachricht wird verworfen: %r" #, python-format msgid "" "metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has no " "volume (volume: None), the sample will be dropped" msgstr "" "Die Messung von Daten %(counter_name)s für %(resource_id)s @ %(timestamp)s " "enthält keinen Datenträger (volume: None). Die Stichprobe wird gelöscht." #, python-format msgid "" "metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has " "volume which is not a number (volume: %(counter_volume)s), the sample will " "be dropped" msgstr "" "Die Messung von Daten %(counter_name)s für %(resource_id)s @ %(timestamp)s " "enthält einen Datenträger ohne Zahl (volume: %(counter_volume)s). Die " "Stichprobe wird gelöscht." msgid "" "pecan_debug cannot be enabled, if workers is > 1, the value is overrided " "with False" msgstr "" "pecan_debug kann nicht aktiviert werden, wenn Worker > 1 ist. Der Wert wird " "mit False überschrieben." #, python-format msgid "unable to configure oslo_cache: %s" msgstr "Konfigurieren von oslo_cache nicht möglich: %s" ceilometer-6.1.5/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-info.po0000664000567000056710000001033313072744706026772 0ustar jenkinsjenkins00000000000000# Frank Kloeker , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-05-19 11:00+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-04 05:06+0000\n" "Last-Translator: Monika Wolf \n" "Language-Team: German\n" "Language: de\n" "X-Generator: Zanata 3.7.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" #, python-format msgid "%d events are removed from database" msgstr "%d Ereignisse aus Datenbank entfernt" #, python-format msgid "%d samples removed from database" msgstr "%d Beispiele aus Datenbank entfernt" msgid "Configuration:" msgstr "Konfiguration:" #, python-format msgid "Connecting to %(db)s on %(nodelist)s" msgstr "Verbindung mit %(db)s auf %(nodelist)s wird hergestellt" msgid "Coordination backend started successfully." msgstr "Das Koordinierungs-Back-End wurde erfolgreich gestartet." #, python-format msgid "Definitions: %s" msgstr "Definitionen: %s" msgid "Detected change in pipeline configuration." msgstr "Es wurde eine Änderung in der Pipelinekonfiguration festgestellt." #, python-format msgid "Dropping event data with TTL %d" msgstr "Löschen von Ereignisdaten mit TTL %d" #, python-format msgid "Dropping metering data with TTL %d" msgstr "Löschen von Messdaten mit TTL %d" #, python-format msgid "Duplicate event detected, skipping it: %s" msgstr "Doppeltes Ereignis erkannt. Wird übersprungen: %s" msgid "Expired residual resource and meter definition data" msgstr "Abgelaufene Daten für residente Ressource und für Messdefinition" #, python-format msgid "Index %s will be recreate." msgstr "Index %s wird erneut erstellt. " #, python-format msgid "Joined partitioning group %s" msgstr "Partitionierungsgruppe %s beigetreten." #, python-format msgid "Left partitioning group %s" msgstr "Partitionierungsgruppe %s verlassen." #, python-format msgid "No limit value provided, result set will be limited to %(limit)d." msgstr "" "Es wurde kein Grenzwert angegeben. Der Ergebnissatz wird auf %(limit)d " "beschränkt." msgid "Nothing to clean, database event time to live is disabled" msgstr "" "Nichts zu bereinigen. Die Lebensdauer (TTL) der Datenbankereignisdaten ist " "deaktiviert." msgid "Nothing to clean, database metering time to live is disabled" msgstr "" "Nichts zu bereinigen. Die Lebensdauer (TTL) der Datenbankstichprobendaten " "ist deaktiviert." #, python-format msgid "" "Pipeline %(pipeline)s: Setup transformer instance %(name)s with parameter " "%(param)s" msgstr "" "Pipeline %(pipeline)s: Konfiguration von Transformerinstanz %(name)s mit " "Parameter %(param)s" #, python-format msgid "Pipeline config: %s" msgstr "Pipelinekonfiguration: %s" msgid "Pipeline configuration file has been updated." msgstr "Die Pipelinekonfigurationsdatei wurde aktualisiert." #, python-format msgid "Polling pollster %(poll)s in the context of %(src)s" msgstr "Abfrage von Pollster %(poll)s im Kontext von %(src)s" #, python-format msgid "Publishing policy set to %s" msgstr "Veröffentlichungsrichtlinie auf %s gesetzt" msgid "Reconfiguring polling tasks." msgstr "Polling-Tasks werden neu konfiguriert." msgid "Reloading notification agent and listeners." msgstr "Benachrichtigungsagent und Listener werden erneut geladen." #, python-format msgid "Skip pollster %(name)s, no %(p_context)sresources found this cycle" msgstr "" "Pollster %(name)s überspringen, keine %(p_context)sressourcen in diesem " "Zyklus gefunden." #, python-format msgid "Starting server in PID %s" msgstr "Starten von Server in PID %s" msgid "detected decoupled pipeline config format" msgstr "entkoppeltes Pipeline-Konfigurationsformat erkannt" #, python-format msgid "metering data %(counter_name)s for %(resource_id)s: %(counter_volume)s" msgstr "" "Messung von Daten %(counter_name)s für %(resource_id)s: %(counter_volume)s" #, python-format msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s" msgstr "" "Bereitstellung auf 0.0.0.0:%(sport)s, Ansicht unter http://127.0.0.1:" "%(vport)s" #, python-format msgid "serving on http://%(host)s:%(port)s" msgstr "Bereitstellung auf http://%(host)s:%(port)s" ceilometer-6.1.5/ceilometer/locale/de/LC_MESSAGES/ceilometer.po0000664000567000056710000004075313072744706025273 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Carsten Duch , 2014 # Christian Berendt , 2014 # Ettore Atalan , 2014 # Frank Kloeker , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-05-19 11:00+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-04 05:03+0000\n" "Last-Translator: Monika Wolf \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: German\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)s nicht gefunden" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "Arithmetiktransformer muss mindestens eine Messgröße im Ausdruck '%s' " "verwenden" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "" "Tabelle %(table_name)s kann nicht erstellt werden, da sie bereits vorhanden " "ist. Fehler wird ignoriert" #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "Fortfahren nach Fehler von %(name)s: %(error)s" #, python-format msgid "Could not connect slave host: %s " msgstr "" "Es konnte keine Verbindung zum untergeordneten Host hergestellt werden: %s " #, python-format msgid "Could not connect to XenAPI: %s" msgstr "Es konnte keine Verbindung zu XenAPI hergestellt werden: %s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "Abruf von CPU-Auslastung nicht möglich für %(id)s: %(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "Abruf von Speicherbelegung nicht möglich für %(id)s: %(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "CPU-Auslastung für VM %s konnte nicht abgerufen werden" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "IP-Adresse von Instanz %s konnte nicht abgerufen werden" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "" "Dispatcher-Ziel nicht definiert, es werden keine Messgrößen übergeben. Das " "Ziel in der Datei 'ceilometer.conf' definieren." #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Löschen von Benachrichtigung %(type)s (UUID:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Fehler von libvirt während Suche nach Instanz : " "[Fehlercode %(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "Fehler bei Auswertung der HTTP-Antwort %s" msgid "Error stopping pollster." msgstr "Fehler beim Stoppen des Pollster." msgid "Event" msgstr "Ereignis" msgid "Expression evaluated to a NaN value!" msgstr "Ausdruck ergab einen NaN-Wert!" #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "Fehler beim Importieren der Erweiterung für %(name)s: %(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Fehler beim Überprüfen von Daten der Instanz , " "Domänenstatus ist ABGESCHALTET." #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "Fehler beim Überprüfen der Speicherbelegung von %(instance_uuid)s, " "Informationen können nicht von libvirt abgerufen werden: %(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "Fehler beim Überprüfen der Speicherbelegung von Instanz , Informationen können nicht von libvirt abgerufen werden." #, python-format msgid "Failed to load any notification handlers for %s" msgstr "Es konnten keine Benachrichtigungshandler für %s geladen werden" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Zeitmarkenwert %s konnte nicht analysiert werden" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "%d Datenpunkte konnten nicht veröffentlicht werden; werden gelöscht" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "" "%d Datenpunkte konnten nicht veröffentlicht werden; in Warteschlange " "einreihen" #, python-format msgid "Failed to record metering data: %s" msgstr "Messdaten wurden nicht aufgezeichnet: %s" #, python-format msgid "Filter expression not valid: %s" msgstr "Filterausdruck nicht gültig: %s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "Instanz %(name)s (%(instance_id)s) wird ignoriert: %(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "Instanz %(name)s wird ignoriert: %(error)s" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "Loadbalancer %(loadbalancer_id)s wird ignoriert." #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "Pool %(pool_id)s wird ignoriert." #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Ungültige YAML-Syntax in Definitionsdatei %(file)s in Zeile: %(line)s, " "Spalte: %(column)s." #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "Ungültiger Zeitraum %(period)s: %(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "Ungültiger Traittyp '%(type)s' für Trait %(trait)s" msgid "Limit must be positive" msgstr "Grenzwert muss positiv sein" #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "Mehr als ein Ereignis mit der ID %s vom Speichertreiber zurückgegeben" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "Mehrere VMs %s in XenServer gefunden" msgid "Must specify connection_url, and connection_password to use" msgstr "" "Angabe von connection_url und connection_password für die Verwendung " "erforderlich" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Kein Plug-in mit dem Namen %(plugin)s verfügbar für %(name)s." msgid "Node Manager init failed" msgstr "Initialisierung von Knoten-Manager fehlgeschlagen" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Nicht berechtigt für den Zugriff auf %(aspect)s %(id)s" #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "OpenDaylight-API hat Folgendes zurückgegeben: %(status)s %(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "Opencontrail-API hat Folgendes zurückgegeben: %(status)s %(reason)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "" "Operator %(operator)s wird nicht unterstützt. Für das Feld %(field)s ist " "nur der Gleichheitsoperator verfügbar." #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "" "Operator %(operator)s wird nicht unterstützt. Unterstützte Operatoren: " "%(supported)s" #, python-format msgid "Order-by expression not valid: %s" msgstr "Ausdruck für 'Sortieren nach' nicht gültig: %s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Analysefehler in JSONPath-Spezifikation '%(jsonpath)s' für %(name)s: %(err)s" msgid "Period must be positive." msgstr "Zeitraum muss positiv sein." #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "Pipeline %(pipeline)s: %(status)s nach Fehler von Publisher %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "Pipeline %(pipeline)s: Fortsetzen nach Fehler von Publisher %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "" "Pipeline %(pipeline)s: Fehler bei Flushoperation für Transformer %(trans)s" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "" "Pipeline %(pipeline)s: Beendigung nach Fehler von Transformer %(trans)s für " "%(smp)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Plug-in angegeben, aber kein Plug-in-Name für %s angegeben." #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "Polling von %(mtr)s-Sensor %(cnt)s Mal fehlgeschlagen!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "Polling von %(name)s %(cnt)s Mal fehlgeschlagen!" #, python-format msgid "Pollster for %s is disabled!" msgstr "Pollster für %s ist inaktiviert!" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "" "Verhindern Sie, dass Pollster %(name)s Quelle %(source)s weiterhin abfragt!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "Maximale Länge von local_queue für Publisher ist überschritten, die %d " "ältesten Beispiele werden gelöscht" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "" "Veröffentlichungsrichtlinie ist unbekannt (%s); auf Standardeinstellung " "setzen" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW-AdminOps-API hat Folgendes zurückgegeben: %(status)s %(reason)s" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "" "Anforderung konnte keine Verbindung mit OpenDaylight über NorthBound REST-" "API herstellen" #, python-format msgid "Required field %s not specified" msgstr "Erforderliches Feld %s nicht angegeben" msgid "Resource" msgstr "Resource" msgid "Sample" msgstr "Beispiel" msgid "Samples should be included in request body" msgstr "Beispiele sollten in Anforderungshauptteil enthalten sein" #, python-format msgid "Skip loading extension for %s" msgstr "Laden der Ausnahme für %s überspringen" #, python-format msgid "String %s is not a valid isotime" msgstr "Zeichenfolge %s ist kein gültiger Wert für 'isotime'" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "" "Die YAML-Datei mit der Definition der Zuordnung zwischen Beispielen und " "gnocchi-Ressourcen/Metriken" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "Der Datentyp %(type)s wird nicht unterstützt. Die Liste der unterstützten " "Datentypen lautet: %(supported)s" #, python-format msgid "The field 'fields' is required for %s" msgstr "Das Feld 'fields' ist erforderlich für %s" msgid "The path for the file publisher is required" msgstr "Der Pfad für den Datei-Publisher ist erforderlich" #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "UPD: Von %s gesendete Daten konnten nicht dekodiert werden" msgid "UDP: Unable to store meter" msgstr "UDP: Messgröße kann nicht gespeichert werden" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "" "Es kann keine Verbindung zum Datenbankserver hergestellt werden: %(errmsg)s." #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Wert %(value)s kann nicht in den erwarteten Datentyp %(type)s umgewandelt " "werden." #, python-format msgid "Unable to discover resources: %s" msgstr "Ressourcen können nicht gefunden werden: %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "Auswertung nicht möglich für Ausdruck %(expr)s: %(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "Publisher %s kann nicht geladen werden" #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "Hypervisorinspector %s kann nicht geladen werden" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "" "Es kann keine erneute Verbindung zur primären mongodb nach %(retries)d " "Versuchen hergestellt werden. Abbruch." #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "Es kann keine erneute Verbindung zur primären mongodb hergestellt werden: " "%(errmsg)s. Erneuter Versuch in %(retry_interval)d Sekunden." msgid "Unable to send sample over UDP" msgstr "Beispiel kann nicht über UDP gesendet werden" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Unerwartete Ausnahme beim Konvertieren von %(value)s in den erwarteten " "Datentyp %(type)s." #, python-format msgid "Unknown discovery extension: %s" msgstr "Unbekannte Erkennungserweiterung: %s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "Unbekannter Metadatentyp. Schlüssel (%s) wird nicht abfragbar sein." #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten für Loadbalancer %(id)s; Beispiel wird " "übersprungen" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten für Firewall %(id)s; Beispiel wird " "übersprungen" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten für Listener %(id)s; Beispiel wird " "übersprungen" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten für Mitglied %(id)s; Beispiel wird " "übersprungen" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten für Pool %(id)s; Beispiel wird " "übersprungen" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten für VIP %(id)s; Beispiel wird " "übersprungen" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten für VPN %(id)s; Beispiel wird " "übersprungen" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "VM %s in VMware vSphere nicht gefunden" #, python-format msgid "VM %s not found in XenServer" msgstr "VM %s in XenServer nicht gefunden" msgid "Wrong sensor type" msgstr "Falscher Sensortyp" msgid "XenAPI not installed" msgstr "XenAPI nicht installiert" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "YAML-Fehler beim Lesen von Definitionsdatei %(file)s." msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." msgstr "" "Alarm-URLs sind nicht verfügbar, wenn Aodh inaktiviert oder nicht verfügbar " "ist." #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "Abruf von CPU-Zeit nicht möglich für %(id)s: %(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "" "Wenn Gnocci aktiviert ist, kann die Option 'direct' nicht den Wert 'true' " "haben. " #, python-format msgid "dropping out of time order sample: %s" msgstr "" "Löschen des nicht in die zeitliche Reihenfolge gehörenden Beispiels: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "Beispiel ohne Vorgänger wird gelöscht: %s" msgid "ipmitool output length mismatch" msgstr "Abweichung bei ipmitool-Ausgabelänge" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes und backup_count sollten Zahlen sein." #, python-format msgid "message signature invalid, discarding message: %r" msgstr "Nachrichtensignatur ungültig, Nachricht wird verworfen: %r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "Analyse von IPMI-Sensordaten fehlgeschlagen, keine Daten von angegebener " "Eingabe abgerufen" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "Analyse von IPMI-Sensordaten fehlgeschlagen, unbekannter Sensortyp" msgid "running ipmitool failure" msgstr "Fehler beim Ausführen von ipmitool" ceilometer-6.1.5/ceilometer/locale/es/0000775000567000056710000000000013072745164021023 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/es/LC_MESSAGES/0000775000567000056710000000000013072745164022610 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer/locale/es/LC_MESSAGES/ceilometer-log-error.po0000664000567000056710000001114713072744706027213 0ustar jenkinsjenkins00000000000000# Eugènia Torrella , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-05-19 11:00+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-18 11:52+0000\n" "Last-Translator: Eugènia Torrella \n" "Language-Team: Spanish\n" "Language: es\n" "X-Generator: Zanata 3.7.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" #, python-format msgid "Cannot load inspector %(name)s: %(err)s" msgstr "No se ha podido cargar el inspector %(name)s: %(err)s" #, python-format msgid "Could not get Resident Memory Usage for %(id)s: %(e)s" msgstr "No se ha podido obtener el uso de memoria residente para %(id)s: %(e)s" #, python-format msgid "Dispatcher failed to handle the %s, requeue it." msgstr "El asignador no ha podido manejar el %s, vuelva a ponerlo en la cola." msgid "Error connecting to coordination backend." msgstr "Error de conexión con el servidor coordinador." msgid "Error getting group membership info from coordination backend." msgstr "" "Error al obtener información de pertenencia a grupos del servidor " "coordinador." #, python-format msgid "Error joining partitioning group %s, re-trying" msgstr "Error al unirse al grupo de partición %s, se está reintentando" #, python-format msgid "Error loading meter definition : %(err)s" msgstr "Error al cargar la definición de medidor : %(err)s" #, python-format msgid "Error processing event and it will be dropped: %s" msgstr "Se ha producido un error al procesar el suceso y se descartará: %s" msgid "Error sending a heartbeat to coordination backend." msgstr "Error al enviar una señal de latido al servidor coordinador." msgid "Fail to process a notification" msgstr "Error al procesar una notificación" msgid "Fail to process notification" msgstr "No se ha podido procesar la notificación" msgid "Failed to connect to Gnocchi." msgstr "No se ha podido conectar con Gnocchi." #, python-format msgid "Failed to connect to Kafka service: %s" msgstr "No se ha podido conectar con el servicio Kafka: %s" #, python-format msgid "Failed to connect to db, purpose %(purpose)s re-try later: %(err)s" msgstr "" "No se ha podido establecer conexión con la base de datos con el propósito " "%(purpose)s. Vuelva a intentarlo más tarde: %(err)s" #, python-format msgid "Failed to connect to db, purpose %(purpose)s retry later: %(err)s" msgstr "" "No se ha podido establecer conexión con la base de datos con el propósito " "%(purpose)s. Vuelva a intentarlo más tarde: %(err)s" #, python-format msgid "Failed to load resource due to error %s" msgstr "No se ha podido cargar el recurso debido a un error: %s" #, python-format msgid "Failed to record event: %s" msgstr "No se ha podido registrar el suceso: %s" #, python-format msgid "Failed to record metering data: %s" msgstr "No se han podido registrar los datos de medición: %s" msgid "Failed to retry to send sample data with max_retry times" msgstr "" "No se ha podido volver a intentar enviar datos de ejemplo max_retry veces" msgid "" "Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: Current agent is " "not part of group and cannot take tasks" msgstr "" "ID de grupo: %{group_id}s, Miembros: %{members}s, Yo: %{me}s: El agente " "actual no forma parte del grupo y no puede coger tareas" #, python-format msgid "Invalid type %s specified" msgstr "Se ha especificado un tipo no válido: %s" #, python-format msgid "Missing field %s" msgstr "Falta el campo %s" msgid "Passed resource dict must contain keys resource_id and resource_url." msgstr "" "El dicionario de recursos que se pase debe contener las claves resource_id y " "resource_url" #, python-format msgid "Required field %(field)s should be a %(type)s" msgstr "El campo obligatorio %(field)s s debería ser un %(type)s" #, python-format msgid "Required field %s not specified" msgstr "No se ha especificado el campo obligatorio %s" #, python-format msgid "Required fields %s not specified" msgstr "No se han especificado los campos obligatorios %s" #, python-format msgid "Skip invalid resource %s" msgstr "Omitir el recurso no válido %s" msgid "Status Code: %{code}s. Failed todispatch event: %{event}s" msgstr "" "Código de estado: %{code}s. No se ha podido asignar el suceso: %{event}s" #, python-format msgid "Unrecognized type value %s" msgstr "Valor de tipo no reconocido %s" #, python-format msgid "inspector call failed for %(ident)s host %(host)s: %(err)s" msgstr "Error en la llamada al inspector del host %(ident)s %(host)s: %(err)s" ceilometer-6.1.5/ceilometer/locale/es/LC_MESSAGES/ceilometer-log-info.po0000664000567000056710000001053413072744706027014 0ustar jenkinsjenkins00000000000000# Eugènia Torrella , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-05-19 11:00+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-18 02:45+0000\n" "Last-Translator: Eugènia Torrella \n" "Language-Team: Spanish\n" "Language: es\n" "X-Generator: Zanata 3.7.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" #, python-format msgid "%d events are removed from database" msgstr "Se han eliminado %d sucesos de la base de datos" #, python-format msgid "%d samples removed from database" msgstr "Se han eliminado %d ejemplos de la base de datos" msgid "Configuration:" msgstr "Configuración:" #, python-format msgid "Connecting to %(db)s on %(nodelist)s" msgstr "Se está estableciendo conexión con %(db)s en %(nodelist)s" msgid "Coordination backend started successfully." msgstr "El servidor coordinador se ha iniciado satisfactoriamente." #, python-format msgid "Definitions: %s" msgstr "Definiciones: %s" msgid "Detected change in pipeline configuration." msgstr "Se ha detectado un cambio en la configuración de la interconexión." #, python-format msgid "Dropping event data with TTL %d" msgstr "Descartando datos de sucesos con TTL %d" #, python-format msgid "Dropping metering data with TTL %d" msgstr "Descartando datos de calibración con TTL %d" #, python-format msgid "Duplicate event detected, skipping it: %s" msgstr "Se ha detectado un suceso duplicado, se omitirá: %s" msgid "Expired residual resource and meter definition data" msgstr "El recurso residual y los datos de definición del medidor han caducado" #, python-format msgid "Index %s will be recreate." msgstr "Se volverá a crear el índice %s." #, python-format msgid "Joined partitioning group %s" msgstr "Se ha unido al grupo de partición %s" #, python-format msgid "Left partitioning group %s" msgstr "Ha dejado el grupo de partición %s" #, python-format msgid "No limit value provided, result set will be limited to %(limit)d." msgstr "" "No se ha proporcionado ningún valor límite, el conjunto de resultados estará " "limitado a %(limit)d." msgid "Nothing to clean, database event time to live is disabled" msgstr "" "No hay nada que limpiar, el tiempo de vida de sucesos de base de datos está " "inhabilitado" msgid "Nothing to clean, database metering time to live is disabled" msgstr "" "No hay nada que limpiar, el tiempo de vida de medición de base de datos está " "inhabilitado" #, python-format msgid "" "Pipeline %(pipeline)s: Setup transformer instance %(name)s with parameter " "%(param)s" msgstr "" "Interconexión %(pipeline)s: Configure la instancia de transformador %(name)s " "con el parámetro %(param)s" #, python-format msgid "Pipeline config: %s" msgstr "Configuración de interconexión: %s" msgid "Pipeline configuration file has been updated." msgstr "Se ha actualizado el archivo de configuración de la interconexión." #, python-format msgid "Polling pollster %(poll)s in the context of %(src)s" msgstr "Sondeando pollster %(poll)s en el contexto de %(src)s" #, python-format msgid "Publishing policy set to %s" msgstr "Política de publicación establecida en %s" msgid "Reconfiguring polling tasks." msgstr "Reconfigurando las tareas de sondeo." msgid "Reloading notification agent and listeners." msgstr "Recargando la notificación, el agente y los escuchas." #, python-format msgid "Skip pollster %(name)s, no %(p_context)sresources found this cycle" msgstr "" "Omitir pollster %(name)s, ningún recurso de %(p_context)s ha encontrado " "este ciclo" #, python-format msgid "Starting server in PID %s" msgstr "Iniciando servidor en PID %s" msgid "detected decoupled pipeline config format" msgstr "" "se ha detectado un formato de configuración de interconexión desacoplado" #, python-format msgid "metering data %(counter_name)s for %(resource_id)s: %(counter_volume)s" msgstr "" "datos de medición %(counter_name)s para %(resource_id)s: %(counter_volume)s" #, python-format msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s" msgstr "sirviendo en 0.0.0.0:%(sport)s, vista en http://127.0.0.1:%(vport)s" #, python-format msgid "serving on http://%(host)s:%(port)s" msgstr "sirviendo en http://%(host)s:%(port)s" ceilometer-6.1.5/ceilometer/locale/es/LC_MESSAGES/ceilometer.po0000664000567000056710000004114413072744706025305 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Rafael Rivero , 2015 # Marian Tort , 2015. #zanata # Eugènia Torrella , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.1.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-05-19 11:00+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-24 07:03+0000\n" "Last-Translator: Eugènia Torrella \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Spanish\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "No se ha encontrado %(entity)s %(id)s" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "El transformador aritmético debe utilizar al menos un medidor en la " "expresión '%s'" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "" "No se puede crear la tabla %(table_name)s, ya existe. Se ignorará el error." #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "Continuar después de error desde %(name)s: %(error)s" #, python-format msgid "Could not connect slave host: %s " msgstr "No se ha podido conectar el host esclavo: %s" #, python-format msgid "Could not connect to XenAPI: %s" msgstr "No se ha podido conectar con XenAPI: %s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "No se ha podido obtener la utilización de CPU para %(id)s: %(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "No se ha podido obtener el uso de memoria para %(id)s: %(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "No se ha podido obtener la utilización de CPU de la VM %s" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "No se ha podido obtener la dirección IP de la instancia %s" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "" "No se ha definido el destino del asignador, no se enviará ningún medidor. " "Defina el destino en el archivo ceilometer.conf" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Descartando la notificación %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Error de libvirt al buscar la instancia : [Código " "de error %(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "Error al analizar la respuesta HTTP: %s." msgid "Error stopping pollster." msgstr "Error al detener el sondeador." msgid "Event" msgstr "Suceso" msgid "Expression evaluated to a NaN value!" msgstr "La expresión se ha evaluado en un valor NaN." #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "No se ha podido importar la extensión para %(name)s: %(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "No se han podido analizar los datos de la instancia , el estado del dominio es SHUTOFF." #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "No se ha podido analizar el uso de memoria de %(instance_uuid)s, no se puede " "obtener información de libvirt: %(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "No se ha podido analizar el uso de memoria de la instancia , no se puede obtener información de libvirt." #, python-format msgid "Failed to load any notification handlers for %s" msgstr "No se ha podido cargar ningún manejador de notificaciones para %s" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "No se ha podido analizar el valor de la indicación de fecha y hora %s" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "No se han podido publicar los puntos de datos %d, se descartarán" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "No se han podido publicar los puntos de datos %d, póngalos en cola" #, python-format msgid "Failed to record metering data: %s" msgstr "No se han podido registrar los datos de medición: %s" #, python-format msgid "Filter expression not valid: %s" msgstr "Expresión de filtro no válida: %s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "Ignorando la instancia %(name)s (%(instance_id)s) : %(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "Ignorando la instancia %(name)s: %(error)s" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "Se ignorará el equilibrador de carga %(loadbalancer_id)s" #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "Se ignorará la agrupación %(pool_id)s" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Sintaxis de YAML no válida en archivo de definiciones %(file)s, en la línea: " "%(line)s, columna: %(column)s." #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "Período no válido %(period)s: %(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "Tipo de rasgo no válido '%(type)s' para el rasgo %(trait)s" msgid "Limit must be positive" msgstr "El límite debe ser positivo" #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "" "Se ha devuelto más de un suceso con el id %s del controlador de " "almacenamiento" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "Se han encontrado varias VM %s en XenServer" msgid "Must specify connection_url, and connection_password to use" msgstr "" "Debe especificar los valores de connection_url y connection_password a " "utilizar" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "No hay ningún plug-in denominado %(plugin)s disponible para %(name)s" msgid "Node Manager init failed" msgstr "El inicio de Gestor de nodos ha fallado" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "No tiene autorización para acceder a %(aspect)s %(id)s" #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "La API OpenDaylitght ha devuelto %(status)s %(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "La API Opencontrail ha devuelto %(status)s %(reason)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "" "El operador %(operator)s no se admite. En el campo %(field)s solo se puede " "utilizar el operador de igualdad" #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "" "El operador %(operator)s no se admite. Los operadores admitidos son: " "%(supported)s" #, python-format msgid "Order-by expression not valid: %s" msgstr "Expresión de Ordenar por no válida: %s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Error de análisis en la especificación de JSONPath '%(jsonpath)s' para " "%(name)s: %(err)s" msgid "Period must be positive." msgstr "El período debe ser positivo." #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "" "Interconexión %(pipeline)s: %(status)s tras el error de la aplicación de " "publicación %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "" "Interconexión %(pipeline)s: Continuar tras error de la aplicación de " "publicación %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "Interconexión %(pipeline)s: Error al vaciar el transformador %(trans)s" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "" "Interconexión %(pipeline)s: Salir tras error del transformador %(trans)s " "para %(smp)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "" "Se ha especificado un plug-in, pero no se ha proporcionado ningún nombre de " "plug-in para %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "El sondeo al sensor %(mtr)s ha fallado %(cnt)s veces." #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "El sondeo %(name)s ha fallado %(cnt)s veces." #, python-format msgid "Pollster for %s is disabled!" msgstr "El sondeador de %s está deshabilitado." #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "" "Impedir que el sondeador %(name)s actúe más en el origen de sondeo %(source)s" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "Se ha superado la longitud máxima de aplicación de publicación local_queue, " "se descartarán los ejemplos más antiguos %d" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "" "No se conoce la política de publicación (%s), forzar al valor predeterminado" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "La API de RGW AdminOps ha devuelto %(status)s %(reason)s" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "" "Ha fallado la solicitud de establecer conexión con OpenDaylight con la API " "REST NorthBound" #, python-format msgid "Required field %s not specified" msgstr "No se ha especificado el campo obligatorio %s" msgid "Resource" msgstr "Recurso" msgid "Sample" msgstr "Muestra" msgid "Samples should be included in request body" msgstr "Se deben incluir ejemplos en el cuerpo de la solicitud" #, python-format msgid "Skip loading extension for %s" msgstr "Omitir la carga de la extensión de %s" #, python-format msgid "String %s is not a valid isotime" msgstr "La cadena %s no es una hora ISO válida" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "" "El archivo Yaml que define la correlación entre los ejemplos y los recursos/" "métricas de gnocchi" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "El tipo de datos %(type)s no es compatible. La lista de tipos de datos " "admitidos es: %(supported)s" #, python-format msgid "The field 'fields' is required for %s" msgstr "El campo 'campos' es obligatorio para %s" msgid "The path for the file publisher is required" msgstr "" "La vía de acceso para la aplicación de publicación de archivos es obligatoria" #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "UDP: no se pueden descodificar los datos enviados por %s" msgid "UDP: Unable to store meter" msgstr "UDP: no se puede almacenar el medidor" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "No se ha podido conectar con el servidor de base de datos: %(errmsg)s." #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "No se ha podido convertir el valor %(value)s al tipo de datos esperado " "%(type)s." #, python-format msgid "Unable to discover resources: %s" msgstr "No se pueden descubrir recursos: %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "No se puede evaluar la expresión %(expr)s: %(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "No se puede cargar la aplicación de publicación %s" #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "No se puede cargar el inspector de hipervisor: %s" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "" "No se ha podido volver a conectar con la mongodb primaria después de " "%(retries)d intentos. Abandonando." #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "No se ha podido volver a conectar con la mongodb primaria: %(errmsg)s. Se " "volverá a intentar en %(retry_interval)d segundos." msgid "Unable to send sample over UDP" msgstr "No se ha podido enviar una muestra sobre UDP" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Excepción inesperada al convertir %(value)s al tipo de datos esperado " "%(type)s." #, python-format msgid "Unknown discovery extension: %s" msgstr "Extensión de descubrimiento desconocida: %s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "Tipo de metadatos desconocido. La clave (%s) no se podrá consultar." #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en el equilibrador de carga " "%(id)s, se omitirá el ejemplo" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en fw %(id)s, se omitirá el " "ejemplo" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en el escucha %(id)s, se " "omitirá el ejemplo" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en el miembro %(id)s, se " "omitirá el ejemplo" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en la agrupación %(id)s, se " "omitirá el ejemplo" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en el vip %(id)s, se omitirá " "el ejemplo" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en la vpn %(id)s, se omitirá " "el ejemplo" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "No se ha encontrado la VM %s en VMware vSphere" #, python-format msgid "VM %s not found in XenServer" msgstr "No se ha encontrado la VM %s en XenServer" msgid "Wrong sensor type" msgstr "Tipo de sensor incorrecto" msgid "XenAPI not installed" msgstr "XenAPI no está instalado" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "Error de YAML al leer el archivo de definiciones %(file)s" msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." msgstr "" "Cuando Aodh está deshabilitado o no disponible, los URL de las alarmas no " "están disponibles." #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "no se ha podido obtener el tiempo de CPU para %(id)s: %(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "" "la opción directo no puede estar definida como true cuando Gnocchi esté " "habilitado." #, python-format msgid "dropping out of time order sample: %s" msgstr "descartando el ejemplo de orden de plazo tiempo excedido: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "eliminando la muestra sin predecesor: %s" msgid "ipmitool output length mismatch" msgstr "no coincidencia en la longitud de salida de ipmitool" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes y backup_count deben ser números." #, python-format msgid "message signature invalid, discarding message: %r" msgstr "firma de mensaje no válida, descartando el mensaje: %r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "ha fallado el análisis de datos de sensor IPMI, no se ha recuperado ningún " "dato de la entrada indicada" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "" "ha fallado el análisis de datos de sensor IPMI, tipo de sensor desconocido" msgid "running ipmitool failure" msgstr "fallo de ejecución de ipmitool" ceilometer-6.1.5/ceilometer/__init__.py0000664000567000056710000000146113072744703021266 0ustar jenkinsjenkins00000000000000# Copyright 2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class NotImplementedError(NotImplementedError): # FIXME(jd) This is used by WSME to return a correct HTTP code. We should # not expose it here but wrap our methods in the API to convert it to a # proper HTTP error. code = 501 ceilometer-6.1.5/CONTRIBUTING.rst0000664000567000056710000000106313072744703017464 0ustar jenkinsjenkins00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/ceilometer ceilometer-6.1.5/setup.py0000664000567000056710000000200413072744703016531 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=1.8'], pbr=True) ceilometer-6.1.5/etc/0000775000567000056710000000000013072745164015600 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/etc/ceilometer/0000775000567000056710000000000013072745164017730 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/etc/ceilometer/event_pipeline.yaml0000664000567000056710000000032413072744706023622 0ustar jenkinsjenkins00000000000000--- sources: - name: event_source events: - "*" sinks: - event_sink sinks: - name: event_sink transformers: triggers: publishers: - notifier:// ceilometer-6.1.5/etc/ceilometer/ceilometer-config-generator.conf0000664000567000056710000000050313072744706026155 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/ceilometer/ceilometer.conf wrap_width = 79 namespace = ceilometer namespace = oslo.concurrency namespace = oslo.db namespace = oslo.log namespace = oslo.messaging namespace = oslo.middleware.cors namespace = oslo.policy namespace = oslo.service.service namespace = keystonemiddleware.auth_token ceilometer-6.1.5/etc/ceilometer/pipeline.yaml0000664000567000056710000000500313072744706022420 0ustar jenkinsjenkins00000000000000--- sources: - name: meter_source interval: 600 meters: - "*" sinks: - meter_sink - name: cpu_source interval: 600 meters: - "cpu" sinks: - cpu_sink - cpu_delta_sink - name: disk_source interval: 600 meters: - "disk.read.bytes" - "disk.read.requests" - "disk.write.bytes" - "disk.write.requests" - "disk.device.read.bytes" - "disk.device.read.requests" - "disk.device.write.bytes" - "disk.device.write.requests" sinks: - disk_sink - name: network_source interval: 600 meters: - "network.incoming.bytes" - "network.incoming.packets" - "network.outgoing.bytes" - "network.outgoing.packets" sinks: - network_sink sinks: - name: meter_sink transformers: publishers: - notifier:// - name: cpu_sink transformers: - name: "rate_of_change" parameters: target: name: "cpu_util" unit: "%" type: "gauge" scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" publishers: - notifier:// - name: cpu_delta_sink transformers: - name: "delta" parameters: target: name: "cpu.delta" growth_only: True publishers: - notifier:// - name: disk_sink transformers: - name: "rate_of_change" parameters: source: map_from: name: "(disk\\.device|disk)\\.(read|write)\\.(bytes|requests)" unit: "(B|request)" target: map_to: name: "\\1.\\2.\\3.rate" unit: "\\1/s" type: "gauge" publishers: - notifier:// - name: network_sink transformers: - name: "rate_of_change" parameters: source: map_from: name: "network\\.(incoming|outgoing)\\.(bytes|packets)" unit: "(B|packet)" target: map_to: name: "network.\\1.\\2.rate" unit: "\\1/s" type: "gauge" publishers: - notifier:// ceilometer-6.1.5/etc/ceilometer/api_paste.ini0000664000567000056710000000130413072744706022375 0ustar jenkinsjenkins00000000000000# Ceilometer API WSGI Pipeline # Define the filters that make up the pipeline for processing WSGI requests # Note: This pipeline is PasteDeploy's term rather than Ceilometer's pipeline # used for processing samples # Remove authtoken from the pipeline if you don't want to use keystone authentication [pipeline:main] pipeline = cors request_id authtoken api-server [app:api-server] paste.app_factory = ceilometer.api.app:app_factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = ceilometer ceilometer-6.1.5/etc/ceilometer/rootwrap.conf0000664000567000056710000000172713072744703022461 0ustar jenkinsjenkins00000000000000# Configuration for ceilometer-rootwrap # This file should be owned by (and only-writeable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writeable by root ! filters_path=/etc/ceilometer/rootwrap.d,/usr/share/ceilometer/rootwrap # List of directories to search executables in, in case filters do not # explicitely specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/sbin,/usr/local/bin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, user0, user1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR ceilometer-6.1.5/etc/ceilometer/rootwrap.d/0000775000567000056710000000000013072745164022027 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/etc/ceilometer/rootwrap.d/ipmi.filters0000664000567000056710000000036013072744703024354 0ustar jenkinsjenkins00000000000000# ceilometer-rootwrap command filters for IPMI capable nodes # This file should be owned by (and only-writeable by) the root user [Filters] # ceilometer/ipmi/nodemanager/node_manager.py: 'ipmitool' ipmitool: CommandFilter, ipmitool, root ceilometer-6.1.5/etc/ceilometer/policy.json0000664000567000056710000000066013072744706022125 0ustar jenkinsjenkins00000000000000{ "context_is_admin": "role:admin", "segregation": "rule:context_is_admin", "telemetry:get_samples": "", "telemetry:get_sample": "", "telemetry:query_sample": "", "telemetry:create_samples": "", "telemetry:compute_statistics": "", "telemetry:get_meters": "", "telemetry:get_resource": "", "telemetry:get_resources": "", "telemetry:events:index": "", "telemetry:events:show": "" } ceilometer-6.1.5/etc/ceilometer/gnocchi_resources.yaml0000664000567000056710000001370513072744706024327 0ustar jenkinsjenkins00000000000000--- resources: - resource_type: identity archive_policy: low metrics: - 'identity.authenticate.success' - 'identity.authenticate.pending' - 'identity.authenticate.failure' - 'identity.user.created' - 'identity.user.deleted' - 'identity.user.updated' - 'identity.group.created' - 'identity.group.deleted' - 'identity.group.updated' - 'identity.role.created' - 'identity.role.deleted' - 'identity.role.updated' - 'identity.project.created' - 'identity.project.deleted' - 'identity.project.updated' - 'identity.trust.created' - 'identity.trust.deleted' - 'identity.role_assignment.created' - 'identity.role_assignment.deleted' - resource_type: ceph_account metrics: - 'radosgw.objects' - 'radosgw.objects.size' - 'radosgw.objects.containers' - 'radosgw.api.request' - 'radosgw.containers.objects' - 'radosgw.containers.objects.size' - resource_type: instance metrics: - 'instance' - 'memory' - 'memory.usage' - 'memory.resident' - 'vcpus' - 'cpu' - 'cpu.delta' - 'cpu_util' - 'disk.root.size' - 'disk.ephemeral.size' - 'disk.read.requests' - 'disk.read.requests.rate' - 'disk.write.requests' - 'disk.write.requests.rate' - 'disk.read.bytes' - 'disk.read.bytes.rate' - 'disk.write.bytes' - 'disk.write.bytes.rate' - 'disk.latency' - 'disk.iops' - 'disk.capacity' - 'disk.allocation' - 'disk.usage' attributes: host: resource_metadata.host image_ref: resource_metadata.image_ref display_name: resource_metadata.display_name flavor_id: resource_metadata.(instance_flavor_id|(flavor.id)) server_group: resource_metadata.user_metadata.server_group - resource_type: instance_network_interface metrics: - 'network.outgoing.packets.rate' - 'network.incoming.packets.rate' - 'network.outgoing.packets' - 'network.incoming.packets' - 'network.outgoing.bytes.rate' - 'network.incoming.bytes.rate' - 'network.outgoing.bytes' - 'network.incoming.bytes' attributes: name: resource_metadata.vnic_name instance_id: resource_metadata.instance_id - resource_type: instance_disk metrics: - 'disk.device.read.requests' - 'disk.device.read.requests.rate' - 'disk.device.write.requests' - 'disk.device.write.requests.rate' - 'disk.device.read.bytes' - 'disk.device.read.bytes.rate' - 'disk.device.write.bytes' - 'disk.device.write.bytes.rate' - 'disk.device.latency' - 'disk.device.iops' - 'disk.device.capacity' - 'disk.device.allocation' - 'disk.device.usage' attributes: name: resource_metadata.disk_name instance_id: resource_metadata.instance_id - resource_type: image metrics: - 'image' - 'image.size' - 'image.download' - 'image.serve' attributes: name: resource_metadata.name container_format: resource_metadata.container_format disk_format: resource_metadata.disk_format - resource_type: ipmi metrics: - 'hardware.ipmi.node.power' - 'hardware.ipmi.node.temperature' - 'hardware.ipmi.node.inlet_temperature' - 'hardware.ipmi.node.outlet_temperature' - 'hardware.ipmi.node.fan' - 'hardware.ipmi.node.current' - 'hardware.ipmi.node.voltage' - 'hardware.ipmi.node.airflow' - 'hardware.ipmi.node.cups' - 'hardware.ipmi.node.cpu_util' - 'hardware.ipmi.node.mem_util' - 'hardware.ipmi.node.io_util' - resource_type: network metrics: - 'bandwidth' - 'network' - 'network.create' - 'network.update' - 'subnet' - 'subnet.create' - 'subnet.update' - 'port' - 'port.create' - 'port.update' - 'router' - 'router.create' - 'router.update' - 'ip.floating' - 'ip.floating.create' - 'ip.floating.update' - resource_type: stack metrics: - 'stack.create' - 'stack.update' - 'stack.delete' - 'stack.resume' - 'stack.suspend' - resource_type: swift_account metrics: - 'storage.objects.incoming.bytes' - 'storage.objects.outgoing.bytes' - 'storage.api.request' - 'storage.objects.size' - 'storage.objects' - 'storage.objects.containers' - 'storage.containers.objects' - 'storage.containers.objects.size' - resource_type: volume metrics: - 'volume' - 'volume.size' - 'volume.create' - 'volume.delete' - 'volume.update' - 'volume.resize' - 'volume.attach' - 'volume.detach' attributes: display_name: resource_metadata.display_name - resource_type: host metrics: - 'hardware.cpu.load.1min' - 'hardware.cpu.load.5min' - 'hardware.cpu.load.15min' - 'hardware.cpu.util' - 'hardware.memory.total' - 'hardware.memory.used' - 'hardware.memory.swap.total' - 'hardware.memory.swap.avail' - 'hardware.memory.buffer' - 'hardware.memory.cached' - 'hardware.network.ip.outgoing.datagrams' - 'hardware.network.ip.incoming.datagrams' - 'hardware.system_stats.cpu.idle' - 'hardware.system_stats.io.outgoing.blocks' - 'hardware.system_stats.io.incoming.blocks' attributes: host_name: resource_metadata.resource_url - resource_type: host_disk metrics: - 'hardware.disk.size.total' - 'hardware.disk.size.used' attributes: host_name: resource_metadata.resource_url device_name: resource_metadata.device - resource_type: host_network_interface metrics: - 'hardware.network.incoming.bytes' - 'hardware.network.outgoing.bytes' - 'hardware.network.outgoing.errors' attributes: host_name: resource_metadata.resource_url device_name: resource_metadata.name ceilometer-6.1.5/etc/ceilometer/README-ceilometer.conf.txt0000664000567000056710000000020013072744706024471 0ustar jenkinsjenkins00000000000000To generate the sample ceilometer.conf file, run the following command from the top-level ceilometer directory: tox -egenconfigceilometer-6.1.5/etc/ceilometer/examples/0000775000567000056710000000000013072745164021546 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/etc/ceilometer/examples/loadbalancer_v2_meter_definitions.yaml0000664000567000056710000002137513072744703031245 0ustar jenkinsjenkins00000000000000metric: # LBaaS V2 - name: "loadbalancer.create" event_type: - "loadbalancer.create.end" type: "delta" unit: "loadbalancer" volume: 1 resource_id: $.payload.loadbalancer.id project_id: $.payload.loadbalancer.tenant_id metadata: name: $.payload.loadbalancer.name description: $.payload.loadbalancer.description listeners: $.payload.loadbalancer.listeners operating_status: $.payload.loadbalancer.operating_status vip_address: $.payload.loadbalancer.vip_address vip_subnet_id: $.payload.loadbalancer.vip_subnet_id admin_state_up: $.payload.loadbalancer.admin_state_up - name: "loadbalancer.update" event_type: - "loadbalancer.update.end" type: "delta" unit: "loadbalancer" volume: 1 resource_id: $.payload.loadbalancer.id project_id: $.payload.loadbalancer.tenant_id metadata: name: $.payload.loadbalancer.name description: $.payload.loadbalancer.description listeners: $.payload.loadbalancer.listeners operating_status: $.payload.loadbalancer.operating_status vip_address: $.payload.loadbalancer.vip_address vip_subnet_id: $.payload.loadbalancer.vip_subnet_id admin_state_up: $.payload.loadbalancer.admin_state_up - name: "loadbalancer.delete" event_type: - "loadbalancer.delete.end" type: "delta" unit: "loadbalancer" volume: 1 resource_id: $.payload.loadbalancer.id project_id: $.payload.loadbalancer.tenant_id metadata: name: $.payload.loadbalancer.name description: $.payload.loadbalancer.description listeners: $.payload.loadbalancer.listeners operating_status: $.payload.loadbalancer.operating_status vip_address: $.payload.loadbalancer.vip_address vip_subnet_id: $.payload.loadbalancer.vip_subnet_id admin_state_up: $.payload.loadbalancer.admin_state_up - name: "listener.create" event_type: - "listener.create.end" type: "delta" unit: "listener" volume: 1 resource_id: $.payload.listener.id project_id: $.payload.listener.tenant_id metadata: name: $.payload.listener.name description: $.payload.listener.description admin_state_up: $.payload.listener.admin_state_up loadbalancers: $.payload.listener.loadbalancers default_pool_id: $.payload.listener.default_pool_id protocol: $.payload.listener.protocol connection_limit: $.payload.listener.connection_limit - name: "listener.update" event_type: - "listener.update.end" type: "delta" unit: "listener" volume: 1 resource_id: $.payload.listener.id project_id: $.payload.listener.tenant_id metadata: name: $.payload.listener.name description: $.payload.listener.description admin_state_up: $.payload.listener.admin_state_up loadbalancers: $.payload.listener.loadbalancers default_pool_id: $.payload.listener.default_pool_id protocol: $.payload.listener.protocol connection_limit: $.payload.listener.connection_limit - name: "listener.delete" event_type: - "listener.delete.end" type: "delta" unit: "listener" volume: 1 resource_id: $.payload.listener.id project_id: $.payload.listener.tenant_id metadata: name: $.payload.listener.name description: $.payload.listener.description admin_state_up: $.payload.listener.admin_state_up loadbalancers: $.payload.listener.loadbalancers default_pool_id: $.payload.listener.default_pool_id protocol: $.payload.listener.protocol connection_limit: $.payload.listener.connection_limit - name: "healthmonitor.create" event_type: - "healthmonitor.create.end" type: "delta" unit: "healthmonitor" volume: 1 resource_id: $.payload.healthmonitor.id project_id: $.payload.healthmonitor.tenant_id metadata: name: $.payload.healthmonitor.name description: $.payload.healthmonitor.description admin_state_up: $.payload.healthmonitor.admin_state_up max_retries: $.payload.healthmonitor.max_retries delay: $.payload.healthmonitor.delay timeout: $.payload.healthmonitor.timeout pools: $.payload.healthmonitor.pools type: $.payload.healthmonitor.type - name: "healthmonitor.update" event_type: - "healthmonitor.update.end" type: "delta" unit: "healthmonitor" volume: 1 resource_id: $.payload.healthmonitor.id project_id: $.payload.healthmonitor.tenant_id metadata: name: $.payload.healthmonitor.name description: $.payload.healthmonitor.description admin_state_up: $.payload.healthmonitor.admin_state_up max_retries: $.payload.healthmonitor.max_retries delay: $.payload.healthmonitor.delay timeout: $.payload.healthmonitor.timeout pools: $.payload.healthmonitor.pools type: $.payload.healthmonitor.type - name: "healthmonitor.delete" event_type: - "healthmonitor.delete.end" type: "delta" unit: "healthmonitor" volume: 1 resource_id: $.payload.healthmonitor.id project_id: $.payload.healthmonitor.tenant_id metadata: name: $.payload.healthmonitor.name description: $.payload.healthmonitor.description admin_state_up: $.payload.healthmonitor.admin_state_up max_retries: $.payload.healthmonitor.max_retries delay: $.payload.healthmonitor.delay timeout: $.payload.healthmonitor.timeout pools: $.payload.healthmonitor.pools type: $.payload.healthmonitor.type - name: "pool.create" event_type: - "pool.create.end" type: "delta" unit: "pool" volume: 1 resource_id: $.payload.pool.id project_id: $.payload.pool.tenant_id metadata: name: $.payload.pool.name description: $.payload.pool.description admin_state_up: $.payload.pool.admin_state_up lb_method: $.payload.pool.lb_method protocol: $.payload.pool.protocol subnet_id: $.payload.pool.subnet_id vip_id: $.payload.pool.vip_id status: $.payload.pool.status status_description: $.payload.pool.status_description - name: "pool.update" event_type: - "pool.update.end" type: "delta" unit: "pool" volume: 1 resource_id: $.payload.pool.id project_id: $.payload.pool.tenant_id metadata: name: $.payload.pool.name description: $.payload.pool.description admin_state_up: $.payload.pool.admin_state_up lb_method: $.payload.pool.lb_method protocol: $.payload.pool.protocol subnet_id: $.payload.pool.subnet_id vip_id: $.payload.pool.vip_id status: $.payload.pool.status status_description: $.payload.pool.status_description - name: "pool.delete" event_type: - "pool.delete.end" type: "delta" unit: "pool" volume: 1 resource_id: $.payload.pool.id project_id: $.payload.pool.tenant_id metadata: name: $.payload.pool.name description: $.payload.pool.description admin_state_up: $.payload.pool.admin_state_up lb_method: $.payload.pool.lb_method protocol: $.payload.pool.protocol subnet_id: $.payload.pool.subnet_id vip_id: $.payload.pool.vip_id status: $.payload.pool.status status_description: $.payload.pool.status_description - name: "member.create" event_type: - "member.create.end" type: "delta" unit: "member" volume: 1 resource_id: $.payload.member.id project_id: $.payload.member.tenant_id metadata: address: $.payload.member.address status: $.payload.member.status status_description: $.payload.member.status_description weight: $.payload.member.weight admin_state_up: $.payload.member.admin_state_up protocol_port: $.payload.member.protocol_port pool_id: $.payload.member.pool_id - name: "member.update" event_type: - "member.update.end" type: "delta" unit: "member" volume: 1 resource_id: $.payload.member.id project_id: $.payload.member.tenant_id metadata: address: $.payload.member.address status: $.payload.member.status status_description: $.payload.member.status_description weight: $.payload.member.weight admin_state_up: $.payload.member.admin_state_up protocol_port: $.payload.member.protocol_port pool_id: $.payload.member.pool_id - name: "member.delete" event_type: - "member.delete.end" type: "delta" unit: "member" volume: 1 resource_id: $.payload.member.id project_id: $.payload.member.tenant_id metadata: address: $.payload.member.address status: $.payload.member.status status_description: $.payload.member.status_description weight: $.payload.member.weight admin_state_up: $.payload.member.admin_state_up protocol_port: $.payload.member.protocol_port pool_id: $.payload.member.pool_id ceilometer-6.1.5/etc/ceilometer/examples/osprofiler_event_definitions.yaml0000664000567000056710000000130213072744703030404 0ustar jenkinsjenkins00000000000000--- - event_type: profiler.* traits: project: fields: payload.project service: fields: payload.service name: fields: payload.name base_id: fields: payload.base_id trace_id: fields: payload.trace_id parent_id: fields: payload.parent_id timestamp: fields: payload.timestamp host: fields: payload.info.host path: fields: payload.info.request.path query: fields: payload.info.request.query method: fields: payload.info.request.method scheme: fields: payload.info.request.scheme db.statement: fields: payload.info.db.statement db.params: fields: payload.info.db.params ceilometer-6.1.5/etc/ceilometer/event_definitions.yaml0000664000567000056710000003550513072744706024341 0ustar jenkinsjenkins00000000000000--- - event_type: compute.instance.* traits: &instance_traits tenant_id: fields: payload.tenant_id user_id: fields: payload.user_id instance_id: fields: payload.instance_id host: fields: publisher_id.`split(., 1, 1)` service: fields: publisher_id.`split(., 0, -1)` memory_mb: type: int fields: payload.memory_mb disk_gb: type: int fields: payload.disk_gb root_gb: type: int fields: payload.root_gb ephemeral_gb: type: int fields: payload.ephemeral_gb vcpus: type: int fields: payload.vcpus instance_type_id: type: int fields: payload.instance_type_id instance_type: fields: payload.instance_type state: fields: payload.state os_architecture: fields: payload.image_meta.'org.openstack__1__architecture' os_version: fields: payload.image_meta.'org.openstack__1__os_version' os_distro: fields: payload.image_meta.'org.openstack__1__os_distro' launched_at: type: datetime fields: payload.launched_at deleted_at: type: datetime fields: payload.deleted_at - event_type: compute.instance.exists traits: <<: *instance_traits audit_period_beginning: type: datetime fields: payload.audit_period_beginning audit_period_ending: type: datetime fields: payload.audit_period_ending - event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*', 'snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*'] traits: &cinder_traits user_id: fields: payload.user_id project_id: fields: payload.tenant_id availability_zone: fields: payload.availability_zone display_name: fields: payload.display_name replication_status: fields: payload.replication_status status: fields: payload.status created_at: fields: payload.created_at - event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*'] traits: <<: *cinder_traits resource_id: fields: payload.volume_id host: fields: payload.host size: fields: payload.size type: fields: payload.volume_type replication_status: fields: payload.replication_status - event_type: ['snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*'] traits: <<: *cinder_traits resource_id: fields: payload.snapshot_id volume_id: fields: payload.volume_id - event_type: ['image_volume_cache.*'] traits: image_id: fields: payload.image_id host: fields: payload.host - event_type: ['image.update', 'image.upload', 'image.delete'] traits: &glance_crud project_id: fields: payload.owner resource_id: fields: payload.id name: fields: payload.name status: fields: payload.status created_at: fields: payload.created_at user_id: fields: payload.owner deleted_at: fields: payload.deleted_at size: fields: payload.size - event_type: image.send traits: &glance_send receiver_project: fields: payload.receiver_tenant_id receiver_user: fields: payload.receiver_user_id user_id: fields: payload.owner_id image_id: fields: payload.image_id destination_ip: fields: payload.destination_ip bytes_sent: fields: payload.bytes_sent - event_type: orchestration.stack.* traits: &orchestration_crud project_id: fields: payload.tenant_id user_id: fields: ['_context_trustor_user_id', '_context_user_id'] resource_id: fields: payload.stack_identity - event_type: sahara.cluster.* traits: &sahara_crud project_id: fields: payload.project_id user_id: fields: _context_user_id resource_id: fields: payload.cluster_id - event_type: sahara.cluster.health traits: &sahara_health <<: *sahara_crud verification_id: fields: payload.verification_id health_check_status: fields: payload.health_check_status health_check_name: fields: payload.health_check_name health_check_description: fields: payload.health_check_description created_at: type: datetime fields: payload.created_at updated_at: type: datetime fields: payload.updated_at - event_type: ['identity.user.*', 'identity.project.*', 'identity.group.*', 'identity.role.*', 'identity.OS-TRUST:trust.*', 'identity.region.*', 'identity.service.*', 'identity.endpoint.*', 'identity.policy.*'] traits: &identity_crud resource_id: fields: payload.resource_info initiator_id: fields: payload.initiator.id project_id: fields: payload.initiator.project_id domain_id: fields: payload.initiator.domain_id - event_type: identity.role_assignment.* traits: &identity_role_assignment role: fields: payload.role group: fields: payload.group domain: fields: payload.domain user: fields: payload.user project: fields: payload.project - event_type: identity.authenticate traits: &identity_authenticate typeURI: fields: payload.typeURI id: fields: payload.id action: fields: payload.action eventType: fields: payload.eventType eventTime: fields: payload.eventTime outcome: fields: payload.outcome initiator_typeURI: fields: payload.initiator.typeURI initiator_id: fields: payload.initiator.id initiator_name: fields: payload.initiator.name initiator_host_agent: fields: payload.initiator.host.agent initiator_host_addr: fields: payload.initiator.host.address target_typeURI: fields: payload.target.typeURI target_id: fields: payload.target.id observer_typeURI: fields: payload.observer.typeURI observer_id: fields: payload.observer.id - event_type: objectstore.http.request traits: &objectstore_request typeURI: fields: payload.typeURI id: fields: payload.id action: fields: payload.action eventType: fields: payload.eventType eventTime: fields: payload.eventTime outcome: fields: payload.outcome initiator_typeURI: fields: payload.initiator.typeURI initiator_id: fields: payload.initiator.id initiator_project_id: fields: payload.initiator.project_id target_typeURI: fields: payload.target.typeURI target_id: fields: payload.target.id target_action: fields: payload.target.action target_metadata_path: fields: payload.target.metadata.path target_metadata_version: fields: payload.target.metadata.version target_metadata_container: fields: payload.target.metadata.container target_metadata_object: fields: payload.target.metadata.object observer_id: fields: payload.observer.id - event_type: magnetodb.table.* traits: &kv_store resource_id: fields: payload.table_uuid user_id: fields: _context_user_id project_id: fields: _context_tenant - event_type: ['network.*', 'subnet.*', 'port.*', 'router.*', 'floatingip.*', 'pool.*', 'vip.*', 'member.*', 'health_monitor.*', 'healthmonitor.*', 'listener.*', 'loadbalancer.*', 'firewall.*', 'firewall_policy.*', 'firewall_rule.*', 'vpnservice.*', 'ipsecpolicy.*', 'ikepolicy.*', 'ipsec_site_connection.*'] traits: &network_traits user_id: fields: _context_user_id project_id: fields: _context_tenant_id - event_type: network.* traits: <<: *network_traits resource_id: fields: ['payload.network.id', 'payload.id'] - event_type: subnet.* traits: <<: *network_traits resource_id: fields: ['payload.subnet.id', 'payload.id'] - event_type: port.* traits: <<: *network_traits resource_id: fields: ['payload.port.id', 'payload.id'] - event_type: router.* traits: <<: *network_traits resource_id: fields: ['payload.router.id', 'payload.id'] - event_type: floatingip.* traits: <<: *network_traits resource_id: fields: ['payload.floatingip.id', 'payload.id'] - event_type: pool.* traits: <<: *network_traits resource_id: fields: ['payload.pool.id', 'payload.id'] - event_type: vip.* traits: <<: *network_traits resource_id: fields: ['payload.vip.id', 'payload.id'] - event_type: member.* traits: <<: *network_traits resource_id: fields: ['payload.member.id', 'payload.id'] - event_type: health_monitor.* traits: <<: *network_traits resource_id: fields: ['payload.health_monitor.id', 'payload.id'] - event_type: healthmonitor.* traits: <<: *network_traits resource_id: fields: ['payload.healthmonitor.id', 'payload.id'] - event_type: listener.* traits: <<: *network_traits resource_id: fields: ['payload.listener.id', 'payload.id'] - event_type: loadbalancer.* traits: <<: *network_traits resource_id: fields: ['payload.loadbalancer.id', 'payload.id'] - event_type: firewall.* traits: <<: *network_traits resource_id: fields: ['payload.firewall.id', 'payload.id'] - event_type: firewall_policy.* traits: <<: *network_traits resource_id: fields: ['payload.firewall_policy.id', 'payload.id'] - event_type: firewall_rule.* traits: <<: *network_traits resource_id: fields: ['payload.firewall_rule.id', 'payload.id'] - event_type: vpnservice.* traits: <<: *network_traits resource_id: fields: ['payload.vpnservice.id', 'payload.id'] - event_type: ipsecpolicy.* traits: <<: *network_traits resource_id: fields: ['payload.ipsecpolicy.id', 'payload.id'] - event_type: ikepolicy.* traits: <<: *network_traits resource_id: fields: ['payload.ikepolicy.id', 'payload.id'] - event_type: ipsec_site_connection.* traits: <<: *network_traits resource_id: fields: ['payload.ipsec_site_connection.id', 'payload.id'] - event_type: '*http.*' traits: &http_audit project_id: fields: payload.initiator.project_id user_id: fields: payload.initiator.id typeURI: fields: payload.typeURI eventType: fields: payload.eventType action: fields: payload.action outcome: fields: payload.outcome id: fields: payload.id eventTime: fields: payload.eventTime requestPath: fields: payload.requestPath observer_id: fields: payload.observer.id target_id: fields: payload.target.id target_typeURI: fields: payload.target.typeURI target_name: fields: payload.target.name initiator_typeURI: fields: payload.initiator.typeURI initiator_id: fields: payload.initiator.id initiator_name: fields: payload.initiator.name initiator_host_address: fields: payload.initiator.host.address - event_type: '*http.response' traits: <<: *http_audit reason_code: fields: payload.reason.reasonCode - event_type: ['dns.domain.create', 'dns.domain.update', 'dns.domain.delete'] traits: &dns_domain_traits status: fields: payload.status retry: fields: payload.retry description: fields: payload.description expire: fields: payload.expire email: fields: payload.email ttl: fields: payload.ttl action: fields: payload.action name: fields: payload.name resource_id: fields: payload.id created_at: fields: payload.created_at updated_at: fields: payload.updated_at version: fields: payload.version parent_domain_id: fields: parent_domain_id serial: fields: payload.serial - event_type: dns.domain.exists traits: <<: *dns_domain_traits audit_period_beginning: type: datetime fields: payload.audit_period_beginning audit_period_ending: type: datetime fields: payload.audit_period_ending - event_type: trove.* traits: &trove_base_traits state: fields: payload.state_description instance_type: fields: payload.instance_type user_id: fields: payload.user_id resource_id: fields: payload.instance_id instance_type_id: fields: payload.instance_type_id launched_at: type: datetime fields: payload.launched_at instance_name: fields: payload.instance_name state: fields: payload.state nova_instance_id: fields: payload.nova_instance_id service_id: fields: payload.service_id created_at: type: datetime fields: payload.created_at region: fields: payload.region - event_type: ['trove.instance.create', 'trove.instance.modify_volume', 'trove.instance.modify_flavor', 'trove.instance.delete'] traits: &trove_common_traits name: fields: payload.name availability_zone: fields: payload.availability_zone instance_size: type: int fields: payload.instance_size volume_size: type: int fields: payload.volume_size nova_volume_id: fields: payload.nova_volume_id - event_type: trove.instance.create traits: <<: [*trove_base_traits, *trove_common_traits] - event_type: trove.instance.modify_volume traits: <<: [*trove_base_traits, *trove_common_traits] old_volume_size: type: int fields: payload.old_volume_size modify_at: type: datetime fields: payload.modify_at - event_type: trove.instance.modify_flavor traits: <<: [*trove_base_traits, *trove_common_traits] old_instance_size: type: int fields: payload.old_instance_size modify_at: type: datetime fields: payload.modify_at - event_type: trove.instance.delete traits: <<: [*trove_base_traits, *trove_common_traits] deleted_at: type: datetime fields: payload.deleted_at - event_type: trove.instance.exists traits: <<: *trove_base_traits display_name: fields: payload.display_name audit_period_beginning: type: datetime fields: payload.audit_period_beginning audit_period_ending: type: datetime fields: payload.audit_period_ending - event_type: profiler.* traits: project: fields: payload.project service: fields: payload.service name: fields: payload.name base_id: fields: payload.base_id trace_id: fields: payload.trace_id parent_id: fields: payload.parent_id timestamp: fields: payload.timestamp host: fields: payload.info.host path: fields: payload.info.request.path query: fields: payload.info.request.query method: fields: payload.info.request.method scheme: fields: payload.info.request.scheme db.statement: fields: payload.info.db.statement db.params: fields: payload.info.db.params ceilometer-6.1.5/etc/apache2/0000775000567000056710000000000013072745164017103 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/etc/apache2/ceilometer0000664000567000056710000000264213072744703021160 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is an example Apache2 configuration file for using the # ceilometer API through mod_wsgi. # Note: If you are using a Debian-based system then the paths # "/var/log/httpd" and "/var/run/httpd" will use "apache2" instead # of "httpd". # # The number of processes and threads is an example only and should # be adjusted according to local requirements. Listen 8777 WSGIDaemonProcess ceilometer-api processes=2 threads=10 user=SOMEUSER display-name=%{GROUP} WSGIProcessGroup ceilometer-api WSGIScriptAlias / /var/www/ceilometer/app WSGIApplicationGroup %{GLOBAL} = 2.4> ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/httpd/ceilometer_error.log CustomLog /var/log/httpd/ceilometer_access.log combined WSGISocketPrefix /var/run/httpd ceilometer-6.1.5/README.rst0000664000567000056710000000044213072744705016514 0ustar jenkinsjenkins00000000000000ceilometer ========== Release notes can be read online at: http://docs.openstack.org/developer/ceilometer/releasenotes/index.html Documentation for the project can be found at: http://docs.openstack.org/developer/ceilometer/ The project home is at: http://launchpad.net/ceilometer ceilometer-6.1.5/run-functional-tests.sh0000775000567000056710000000035513072744706021474 0ustar jenkinsjenkins00000000000000#!/bin/bash -x set -e # Use a mongodb backend by default if [ -z $CEILOMETER_TEST_BACKEND ]; then CEILOMETER_TEST_BACKEND="mongodb" fi for backend in $CEILOMETER_TEST_BACKEND; do overtest $backend ./tools/pretty_tox.sh $* done ceilometer-6.1.5/pylintrc0000664000567000056710000000304213072744706016614 0ustar jenkinsjenkins00000000000000# The format of this file isn't really documented; just use --generate-rcfile [MASTER] # Add to the black list. It should be a base name, not a # path. ignore=openstack [Messages Control] # NOTE(justinsb): We might want to have a 2nd strict pylintrc in future # C0111: Don't require docstrings on every method # W0511: TODOs in code comments are fine. # W0142: *args and **kwargs are fine. # W0622: Redefining id is fine. # W0703: Catch "Exception". disable=C0111,W0511,W0142,W0622,W0703 [Basic] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Type attributes names can be 2 to 31 characters long, with lowercase and underscores attr-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long and be lowercased with underscores method-rgx=([a-z_][a-z0-9_]{1,30}|setUp|tearDown)$ # Module names matching sahara-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(sahara-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [Design] max-public-methods=100 min-public-methods=0 max-args=6 [Variables] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. # _ is used by our localization additional-builtins=_ [TYPECHECK] generated-members=query,node_template,status_code,data ceilometer-6.1.5/setup.cfg0000664000567000056710000004052213072745164016651 0ustar jenkinsjenkins00000000000000[metadata] name = ceilometer summary = OpenStack Telemetry description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://docs.openstack.org/developer/ceilometer/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Topic :: System :: Monitoring [global] setup-hooks = pbr.hooks.setup_hook [files] packages = ceilometer [entry_points] ceilometer.notification = instance = ceilometer.compute.notifications.instance:Instance instance_scheduled = ceilometer.compute.notifications.instance:InstanceScheduled network = ceilometer.network.notifications:Network subnet = ceilometer.network.notifications:Subnet port = ceilometer.network.notifications:Port router = ceilometer.network.notifications:Router floatingip = ceilometer.network.notifications:FloatingIP http.request = ceilometer.middleware:HTTPRequest http.response = ceilometer.middleware:HTTPResponse hardware.ipmi.temperature = ceilometer.ipmi.notifications.ironic:TemperatureSensorNotification hardware.ipmi.voltage = ceilometer.ipmi.notifications.ironic:VoltageSensorNotification hardware.ipmi.current = ceilometer.ipmi.notifications.ironic:CurrentSensorNotification hardware.ipmi.fan = ceilometer.ipmi.notifications.ironic:FanSensorNotification network.services.lb.pool = ceilometer.network.notifications:Pool network.services.lb.vip = ceilometer.network.notifications:Vip network.services.lb.member = ceilometer.network.notifications:Member network.services.lb.health_monitor = ceilometer.network.notifications:HealthMonitor network.services.firewall = ceilometer.network.notifications:Firewall network.services.firewall.policy = ceilometer.network.notifications:FirewallPolicy network.services.firewall.rule = ceilometer.network.notifications:FirewallRule network.services.vpn = ceilometer.network.notifications:VPNService network.services.vpn.ipsecpolicy = ceilometer.network.notifications:IPSecPolicy network.services.vpn.ikepolicy = ceilometer.network.notifications:IKEPolicy network.services.vpn.connections = ceilometer.network.notifications:IPSecSiteConnection _sample = ceilometer.telemetry.notifications:TelemetryIpc meter = ceilometer.meter.notifications:ProcessMeterNotifications ceilometer.discover = local_instances = ceilometer.compute.discovery:InstanceDiscovery endpoint = ceilometer.agent.discovery.endpoint:EndpointDiscovery tenant = ceilometer.agent.discovery.tenant:TenantDiscovery local_node = ceilometer.agent.discovery.localnode:LocalNodeDiscovery lb_pools = ceilometer.network.services.discovery:LBPoolsDiscovery lb_vips = ceilometer.network.services.discovery:LBVipsDiscovery lb_members = ceilometer.network.services.discovery:LBMembersDiscovery lb_listeners = ceilometer.network.services.discovery:LBListenersDiscovery lb_loadbalancers = ceilometer.network.services.discovery:LBLoadBalancersDiscovery lb_health_probes = ceilometer.network.services.discovery:LBHealthMonitorsDiscovery vpn_services = ceilometer.network.services.discovery:VPNServicesDiscovery ipsec_connections = ceilometer.network.services.discovery:IPSecConnectionsDiscovery fw_services = ceilometer.network.services.discovery:FirewallDiscovery fw_policy = ceilometer.network.services.discovery:FirewallPolicyDiscovery tripleo_overcloud_nodes = ceilometer.hardware.discovery:NodesDiscoveryTripleO fip_services = ceilometer.network.services.discovery:FloatingIPDiscovery ceilometer.poll.compute = disk.read.requests = ceilometer.compute.pollsters.disk:ReadRequestsPollster disk.write.requests = ceilometer.compute.pollsters.disk:WriteRequestsPollster disk.read.bytes = ceilometer.compute.pollsters.disk:ReadBytesPollster disk.write.bytes = ceilometer.compute.pollsters.disk:WriteBytesPollster disk.read.requests.rate = ceilometer.compute.pollsters.disk:ReadRequestsRatePollster disk.write.requests.rate = ceilometer.compute.pollsters.disk:WriteRequestsRatePollster disk.read.bytes.rate = ceilometer.compute.pollsters.disk:ReadBytesRatePollster disk.write.bytes.rate = ceilometer.compute.pollsters.disk:WriteBytesRatePollster disk.device.read.requests = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsPollster disk.device.write.requests = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsPollster disk.device.read.bytes = ceilometer.compute.pollsters.disk:PerDeviceReadBytesPollster disk.device.write.bytes = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesPollster disk.device.read.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsRatePollster disk.device.write.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsRatePollster disk.device.read.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceReadBytesRatePollster disk.device.write.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesRatePollster disk.latency = ceilometer.compute.pollsters.disk:DiskLatencyPollster disk.device.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskLatencyPollster disk.iops = ceilometer.compute.pollsters.disk:DiskIOPSPollster disk.device.iops = ceilometer.compute.pollsters.disk:PerDeviceDiskIOPSPollster cpu = ceilometer.compute.pollsters.cpu:CPUPollster cpu_util = ceilometer.compute.pollsters.cpu:CPUUtilPollster network.incoming.bytes = ceilometer.compute.pollsters.net:IncomingBytesPollster network.incoming.packets = ceilometer.compute.pollsters.net:IncomingPacketsPollster network.outgoing.bytes = ceilometer.compute.pollsters.net:OutgoingBytesPollster network.outgoing.packets = ceilometer.compute.pollsters.net:OutgoingPacketsPollster network.incoming.bytes.rate = ceilometer.compute.pollsters.net:IncomingBytesRatePollster network.outgoing.bytes.rate = ceilometer.compute.pollsters.net:OutgoingBytesRatePollster instance = ceilometer.compute.pollsters.instance:InstancePollster memory.usage = ceilometer.compute.pollsters.memory:MemoryUsagePollster memory.resident = ceilometer.compute.pollsters.memory:MemoryResidentPollster disk.capacity = ceilometer.compute.pollsters.disk:CapacityPollster disk.allocation = ceilometer.compute.pollsters.disk:AllocationPollster disk.usage = ceilometer.compute.pollsters.disk:PhysicalPollster disk.device.capacity = ceilometer.compute.pollsters.disk:PerDeviceCapacityPollster disk.device.allocation = ceilometer.compute.pollsters.disk:PerDeviceAllocationPollster disk.device.usage = ceilometer.compute.pollsters.disk:PerDevicePhysicalPollster ceilometer.poll.ipmi = hardware.ipmi.node.power = ceilometer.ipmi.pollsters.node:PowerPollster hardware.ipmi.node.temperature = ceilometer.ipmi.pollsters.node:InletTemperaturePollster hardware.ipmi.node.outlet_temperature = ceilometer.ipmi.pollsters.node:OutletTemperaturePollster hardware.ipmi.node.airflow = ceilometer.ipmi.pollsters.node:AirflowPollster hardware.ipmi.node.cups = ceilometer.ipmi.pollsters.node:CUPSIndexPollster hardware.ipmi.node.cpu_util = ceilometer.ipmi.pollsters.node:CPUUtilPollster hardware.ipmi.node.mem_util = ceilometer.ipmi.pollsters.node:MemUtilPollster hardware.ipmi.node.io_util = ceilometer.ipmi.pollsters.node:IOUtilPollster hardware.ipmi.temperature = ceilometer.ipmi.pollsters.sensor:TemperatureSensorPollster hardware.ipmi.voltage = ceilometer.ipmi.pollsters.sensor:VoltageSensorPollster hardware.ipmi.current = ceilometer.ipmi.pollsters.sensor:CurrentSensorPollster hardware.ipmi.fan = ceilometer.ipmi.pollsters.sensor:FanSensorPollster ceilometer.poll.central = ip.floating = ceilometer.network.floatingip:FloatingIPPollster image = ceilometer.image.glance:ImagePollster image.size = ceilometer.image.glance:ImageSizePollster rgw.containers.objects = ceilometer.objectstore.rgw:ContainersObjectsPollster rgw.containers.objects.size = ceilometer.objectstore.rgw:ContainersSizePollster rgw.objects = ceilometer.objectstore.rgw:ObjectsPollster rgw.objects.size = ceilometer.objectstore.rgw:ObjectsSizePollster rgw.objects.containers = ceilometer.objectstore.rgw:ObjectsContainersPollster rgw.usage = ceilometer.objectstore.rgw:UsagePollster storage.containers.objects = ceilometer.objectstore.swift:ContainersObjectsPollster storage.containers.objects.size = ceilometer.objectstore.swift:ContainersSizePollster storage.objects = ceilometer.objectstore.swift:ObjectsPollster storage.objects.size = ceilometer.objectstore.swift:ObjectsSizePollster storage.objects.containers = ceilometer.objectstore.swift:ObjectsContainersPollster energy = ceilometer.energy.kwapi:EnergyPollster power = ceilometer.energy.kwapi:PowerPollster switch.port = ceilometer.network.statistics.port:PortPollster switch.port.receive.packets = ceilometer.network.statistics.port:PortPollsterReceivePackets switch.port.transmit.packets = ceilometer.network.statistics.port:PortPollsterTransmitPackets switch.port.receive.bytes = ceilometer.network.statistics.port:PortPollsterReceiveBytes switch.port.transmit.bytes = ceilometer.network.statistics.port:PortPollsterTransmitBytes switch.port.receive.drops = ceilometer.network.statistics.port:PortPollsterReceiveDrops switch.port.transmit.drops = ceilometer.network.statistics.port:PortPollsterTransmitDrops switch.port.receive.errors = ceilometer.network.statistics.port:PortPollsterReceiveErrors switch.port.transmit.errors = ceilometer.network.statistics.port:PortPollsterTransmitErrors switch.port.receive.frame_error = ceilometer.network.statistics.port:PortPollsterReceiveFrameErrors switch.port.receive.overrun_error = ceilometer.network.statistics.port:PortPollsterReceiveOverrunErrors switch.port.receive.crc_error = ceilometer.network.statistics.port:PortPollsterReceiveCRCErrors switch.port.collision.count = ceilometer.network.statistics.port:PortPollsterCollisionCount switch.table = ceilometer.network.statistics.table:TablePollster switch.table.active.entries = ceilometer.network.statistics.table:TablePollsterActiveEntries switch.table.lookup.packets = ceilometer.network.statistics.table:TablePollsterLookupPackets switch.table.matched.packets = ceilometer.network.statistics.table:TablePollsterMatchedPackets switch = ceilometer.network.statistics.switch:SWPollster switch.flow = ceilometer.network.statistics.flow:FlowPollster switch.flow.bytes = ceilometer.network.statistics.flow:FlowPollsterBytes switch.flow.duration.nanoseconds = ceilometer.network.statistics.flow:FlowPollsterDurationNanoseconds switch.flow.duration.seconds = ceilometer.network.statistics.flow:FlowPollsterDurationSeconds switch.flow.packets = ceilometer.network.statistics.flow:FlowPollsterPackets network.services.lb.pool = ceilometer.network.services.lbaas:LBPoolPollster network.services.lb.vip = ceilometer.network.services.lbaas:LBVipPollster network.services.lb.member = ceilometer.network.services.lbaas:LBMemberPollster network.services.lb.listener = ceilometer.network.services.lbaas:LBListenerPollster network.services.lb.loadbalancer = ceilometer.network.services.lbaas:LBLoadBalancerPollster network.services.lb.health_monitor = ceilometer.network.services.lbaas:LBHealthMonitorPollster network.services.lb.total.connections = ceilometer.network.services.lbaas:LBTotalConnectionsPollster network.services.lb.active.connections = ceilometer.network.services.lbaas:LBActiveConnectionsPollster network.services.lb.incoming.bytes = ceilometer.network.services.lbaas:LBBytesInPollster network.services.lb.outgoing.bytes = ceilometer.network.services.lbaas:LBBytesOutPollster network.services.vpn = ceilometer.network.services.vpnaas:VPNServicesPollster network.services.vpn.connections = ceilometer.network.services.vpnaas:IPSecConnectionsPollster network.services.firewall = ceilometer.network.services.fwaas:FirewallPollster network.services.firewall.policy = ceilometer.network.services.fwaas:FirewallPolicyPollster ceilometer.builder.poll.central = hardware.snmp = ceilometer.hardware.pollsters.generic:GenericHardwareDeclarativePollster ceilometer.event.storage = es = ceilometer.event.storage.impl_elasticsearch:Connection log = ceilometer.event.storage.impl_log:Connection mongodb = ceilometer.event.storage.impl_mongodb:Connection mysql = ceilometer.event.storage.impl_sqlalchemy:Connection postgresql = ceilometer.event.storage.impl_sqlalchemy:Connection sqlite = ceilometer.event.storage.impl_sqlalchemy:Connection hbase = ceilometer.event.storage.impl_hbase:Connection db2 = ceilometer.event.storage.impl_db2:Connection ceilometer.metering.storage = log = ceilometer.storage.impl_log:Connection mongodb = ceilometer.storage.impl_mongodb:Connection mysql = ceilometer.storage.impl_sqlalchemy:Connection postgresql = ceilometer.storage.impl_sqlalchemy:Connection sqlite = ceilometer.storage.impl_sqlalchemy:Connection hbase = ceilometer.storage.impl_hbase:Connection db2 = ceilometer.storage.impl_db2:Connection ceilometer.compute.virt = libvirt = ceilometer.compute.virt.libvirt.inspector:LibvirtInspector hyperv = ceilometer.compute.virt.hyperv.inspector:HyperVInspector vsphere = ceilometer.compute.virt.vmware.inspector:VsphereInspector xenapi = ceilometer.compute.virt.xenapi.inspector:XenapiInspector ceilometer.hardware.inspectors = snmp = ceilometer.hardware.inspector.snmp:SNMPInspector ceilometer.transformer = accumulator = ceilometer.transformer.accumulator:TransformerAccumulator delta = ceilometer.transformer.conversions:DeltaTransformer unit_conversion = ceilometer.transformer.conversions:ScalingTransformer rate_of_change = ceilometer.transformer.conversions:RateOfChangeTransformer aggregator = ceilometer.transformer.conversions:AggregatorTransformer arithmetic = ceilometer.transformer.arithmetic:ArithmeticTransformer ceilometer.publisher = test = ceilometer.publisher.test:TestPublisher notifier = ceilometer.publisher.messaging:SampleNotifierPublisher udp = ceilometer.publisher.udp:UDPPublisher file = ceilometer.publisher.file:FilePublisher direct = ceilometer.publisher.direct:DirectPublisher kafka = ceilometer.publisher.kafka_broker:KafkaBrokerPublisher ceilometer.event.publisher = test = ceilometer.publisher.test:TestPublisher direct = ceilometer.publisher.direct:DirectPublisher notifier = ceilometer.publisher.messaging:EventNotifierPublisher kafka = ceilometer.publisher.kafka_broker:KafkaBrokerPublisher ceilometer.event.trait_plugin = split = ceilometer.event.trait_plugins:SplitterTraitPlugin bitfield = ceilometer.event.trait_plugins:BitfieldTraitPlugin timedelta = ceilometer.event.trait_plugins:TimedeltaPlugin console_scripts = ceilometer-api = ceilometer.cmd.api:main ceilometer-polling = ceilometer.cmd.polling:main ceilometer-agent-notification = ceilometer.cmd.agent_notification:main ceilometer-send-sample = ceilometer.cmd.sample:send_sample ceilometer-dbsync = ceilometer.cmd.storage:dbsync ceilometer-expirer = ceilometer.cmd.storage:expirer ceilometer-rootwrap = oslo_rootwrap.cmd:main ceilometer-collector = ceilometer.cmd.collector:main ceilometer.dispatcher.meter = database = ceilometer.dispatcher.database:DatabaseDispatcher file = ceilometer.dispatcher.file:FileDispatcher http = ceilometer.dispatcher.http:HttpDispatcher gnocchi = ceilometer.dispatcher.gnocchi:GnocchiDispatcher ceilometer.dispatcher.event = database = ceilometer.dispatcher.database:DatabaseDispatcher file = ceilometer.dispatcher.file:FileDispatcher http = ceilometer.dispatcher.http:HttpDispatcher network.statistics.drivers = opendaylight = ceilometer.network.statistics.opendaylight.driver:OpenDayLightDriver opencontrail = ceilometer.network.statistics.opencontrail.driver:OpencontrailDriver oslo.config.opts = ceilometer = ceilometer.opts:list_opts oslo.config.opts.defaults = ceilometer = ceilometer.conf.defaults:set_cors_middleware_defaults keystoneauth1.plugin = password-ceilometer-legacy = ceilometer.keystone_client:LegacyCeilometerKeystoneLoader tempest.test_plugins = ceilometer_tests = ceilometer.tests.tempest.plugin:CeilometerTempestPlugin [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source [pbr] warnerrors = true autodoc_index_modules = true [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = ceilometer/locale/ceilometer.pot [compile_catalog] directory = ceilometer/locale domain = ceilometer [update_catalog] domain = ceilometer output_dir = ceilometer/locale input_file = ceilometer/locale/ceilometer.pot [egg_info] tag_build = tag_date = 0 ceilometer-6.1.5/MAINTAINERS0000664000567000056710000000071413072744705016524 0ustar jenkinsjenkins00000000000000= Generalist Code Reviewers = The current members of ceilometer-core are listed here: https://launchpad.net/~ceilometer-drivers/+members#active This group can +2 and approve patches in Ceilometer. However, they may choose to seek feedback from the appropriate specialist maintainer before approving a patch if it is in any way controversial or risky. = IRC handles of maintainers = cdent gordc ildikov jd__ liusheng llu _nadya_ pradk rohit_ sileht zqfan ceilometer-6.1.5/rally-jobs/0000775000567000056710000000000013072745164017103 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/rally-jobs/plugins/0000775000567000056710000000000013072745164020564 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/rally-jobs/plugins/README.rst0000664000567000056710000000060613072744703022253 0ustar jenkinsjenkins00000000000000Rally plugins ============= All *.py modules from this directory will be auto-loaded by Rally and all plugins will be discoverable. There is no need of any extra configuration and there is no difference between writing them here and in rally code base. Note that it is better to push all interesting and useful benchmarks to Rally code base, this simplifies administration for Operators. ceilometer-6.1.5/rally-jobs/plugins/plugin_sample.py0000664000567000056710000000171713072744706024004 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Sample of plugin for Ceilometer. For more Ceilometer related benchmarks take a look here: github.com/openstack/rally/blob/master/rally/benchmark/scenarios/ceilometer/ About plugins: https://rally.readthedocs.org/en/latest/plugins.html Rally concepts https://wiki.openstack.org/wiki/Rally/Concepts """ from rally.benchmark.scenarios import base class CeilometerPlugin(base.Scenario): pass ceilometer-6.1.5/rally-jobs/README.rst0000664000567000056710000000157013072744703020573 0ustar jenkinsjenkins00000000000000Rally job related files ======================= This directory contains rally tasks and plugins that are run by OpenStack CI. Structure --------- * plugins - directory where you can add rally plugins. Almost everything in Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic cleanup resources, .... * extra - all files from this directory will be copy pasted to gates, so you are able to use absolute paths in rally tasks. Files will be located in ~/.rally/extra/* * ceilometer is a task that is run in gates against Ceilometer Useful links ------------ * More about Rally: https://rally.readthedocs.org/en/latest/ * How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html * About plugins: https://rally.readthedocs.org/en/latest/plugins.html * Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins ceilometer-6.1.5/rally-jobs/extra/0000775000567000056710000000000013072745164020226 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/rally-jobs/extra/README.rst0000664000567000056710000000025513072744703021715 0ustar jenkinsjenkins00000000000000Extra files =========== All files from this directory will be copy pasted to gates, so you are able to use absolute path in rally tasks. Files will be in ~/.rally/extra/* ceilometer-6.1.5/rally-jobs/extra/fake.img0000664000567000056710000000000013072744703021616 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/rally-jobs/ceilometer.yaml0000664000567000056710000000263613072744703022124 0ustar jenkinsjenkins00000000000000--- CeilometerMeters.list_meters: - runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerResource.list_resources: - runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerStats.create_meter_and_get_stats: - args: user_id: "user-id" resource_id: "resource-id" counter_volume: 1.0 counter_unit: "" counter_type: "cumulative" runner: type: "constant" times: 20 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerQueries.create_and_query_samples: - args: filter: {"=": {"counter_unit": "instance"}} orderby: !!null limit: 10 counter_name: "cpu_util" counter_type: "gauge" counter_unit: "instance" counter_volume: "1.0" resource_id: "resource_id" runner: type: "constant" times: 20 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 ceilometer-6.1.5/LICENSE0000664000567000056710000002363713072744703016043 0ustar jenkinsjenkins00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ceilometer-6.1.5/ceilometer.egg-info/0000775000567000056710000000000013072745164020647 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/ceilometer.egg-info/not-zip-safe0000664000567000056710000000000113072745142023071 0ustar jenkinsjenkins00000000000000 ceilometer-6.1.5/ceilometer.egg-info/dependency_links.txt0000664000567000056710000000000113072745162024713 0ustar jenkinsjenkins00000000000000 ceilometer-6.1.5/ceilometer.egg-info/SOURCES.txt0000664000567000056710000006510413072745164022541 0ustar jenkinsjenkins00000000000000.coveragerc .mailmap .testr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MAINTAINERS README.rst babel.cfg functions.sh pylintrc requirements.txt run-functional-tests.sh setup.cfg setup.py test-requirements.txt tox.ini ceilometer/__init__.py ceilometer/collector.py ceilometer/coordination.py ceilometer/declarative.py ceilometer/exchange_control.py ceilometer/i18n.py ceilometer/keystone_client.py ceilometer/messaging.py ceilometer/middleware.py ceilometer/neutron_client.py ceilometer/notification.py ceilometer/nova_client.py ceilometer/opts.py ceilometer/pipeline.py ceilometer/sample.py ceilometer/service.py ceilometer/service_base.py ceilometer/utils.py ceilometer/version.py ceilometer.egg-info/PKG-INFO ceilometer.egg-info/SOURCES.txt ceilometer.egg-info/dependency_links.txt ceilometer.egg-info/entry_points.txt ceilometer.egg-info/not-zip-safe ceilometer.egg-info/pbr.json ceilometer.egg-info/requires.txt ceilometer.egg-info/top_level.txt ceilometer/agent/__init__.py ceilometer/agent/manager.py ceilometer/agent/plugin_base.py ceilometer/agent/discovery/__init__.py ceilometer/agent/discovery/endpoint.py ceilometer/agent/discovery/localnode.py ceilometer/agent/discovery/tenant.py ceilometer/api/__init__.py ceilometer/api/app.py ceilometer/api/app.wsgi ceilometer/api/hooks.py ceilometer/api/middleware.py ceilometer/api/rbac.py ceilometer/api/controllers/__init__.py ceilometer/api/controllers/root.py ceilometer/api/controllers/v2/__init__.py ceilometer/api/controllers/v2/base.py ceilometer/api/controllers/v2/capabilities.py ceilometer/api/controllers/v2/events.py ceilometer/api/controllers/v2/meters.py ceilometer/api/controllers/v2/query.py ceilometer/api/controllers/v2/resources.py ceilometer/api/controllers/v2/root.py ceilometer/api/controllers/v2/samples.py ceilometer/api/controllers/v2/utils.py ceilometer/cmd/__init__.py ceilometer/cmd/agent_notification.py ceilometer/cmd/api.py ceilometer/cmd/collector.py ceilometer/cmd/polling.py ceilometer/cmd/sample.py ceilometer/cmd/storage.py ceilometer/compute/__init__.py ceilometer/compute/discovery.py ceilometer/compute/util.py ceilometer/compute/notifications/__init__.py ceilometer/compute/notifications/instance.py ceilometer/compute/pollsters/__init__.py ceilometer/compute/pollsters/cpu.py ceilometer/compute/pollsters/disk.py ceilometer/compute/pollsters/instance.py ceilometer/compute/pollsters/memory.py ceilometer/compute/pollsters/net.py ceilometer/compute/pollsters/util.py ceilometer/compute/virt/__init__.py ceilometer/compute/virt/inspector.py ceilometer/compute/virt/hyperv/__init__.py ceilometer/compute/virt/hyperv/inspector.py ceilometer/compute/virt/libvirt/__init__.py ceilometer/compute/virt/libvirt/inspector.py ceilometer/compute/virt/vmware/__init__.py ceilometer/compute/virt/vmware/inspector.py ceilometer/compute/virt/vmware/vsphere_operations.py ceilometer/compute/virt/xenapi/__init__.py ceilometer/compute/virt/xenapi/inspector.py ceilometer/conf/__init__.py ceilometer/conf/defaults.py ceilometer/dispatcher/__init__.py ceilometer/dispatcher/database.py ceilometer/dispatcher/file.py ceilometer/dispatcher/gnocchi.py ceilometer/dispatcher/http.py ceilometer/energy/__init__.py ceilometer/energy/kwapi.py ceilometer/event/__init__.py ceilometer/event/converter.py ceilometer/event/endpoint.py ceilometer/event/trait_plugins.py ceilometer/event/storage/__init__.py ceilometer/event/storage/base.py ceilometer/event/storage/impl_db2.py ceilometer/event/storage/impl_elasticsearch.py ceilometer/event/storage/impl_hbase.py ceilometer/event/storage/impl_log.py ceilometer/event/storage/impl_mongodb.py ceilometer/event/storage/impl_sqlalchemy.py ceilometer/event/storage/models.py ceilometer/event/storage/pymongo_base.py ceilometer/hacking/__init__.py ceilometer/hacking/checks.py ceilometer/hardware/__init__.py ceilometer/hardware/discovery.py ceilometer/hardware/inspector/__init__.py ceilometer/hardware/inspector/base.py ceilometer/hardware/inspector/snmp.py ceilometer/hardware/pollsters/__init__.py ceilometer/hardware/pollsters/generic.py ceilometer/hardware/pollsters/util.py ceilometer/hardware/pollsters/data/snmp.yaml ceilometer/image/__init__.py ceilometer/image/glance.py ceilometer/ipmi/__init__.py ceilometer/ipmi/notifications/__init__.py ceilometer/ipmi/notifications/ironic.py ceilometer/ipmi/platform/__init__.py ceilometer/ipmi/platform/exception.py ceilometer/ipmi/platform/intel_node_manager.py ceilometer/ipmi/platform/ipmi_sensor.py ceilometer/ipmi/platform/ipmitool.py ceilometer/ipmi/pollsters/__init__.py ceilometer/ipmi/pollsters/node.py ceilometer/ipmi/pollsters/sensor.py ceilometer/locale/de/LC_MESSAGES/ceilometer-log-error.po ceilometer/locale/de/LC_MESSAGES/ceilometer-log-info.po ceilometer/locale/de/LC_MESSAGES/ceilometer-log-warning.po ceilometer/locale/de/LC_MESSAGES/ceilometer.po ceilometer/locale/es/LC_MESSAGES/ceilometer-log-error.po ceilometer/locale/es/LC_MESSAGES/ceilometer-log-info.po ceilometer/locale/es/LC_MESSAGES/ceilometer.po ceilometer/locale/fr/LC_MESSAGES/ceilometer.po ceilometer/locale/it/LC_MESSAGES/ceilometer.po ceilometer/locale/ja/LC_MESSAGES/ceilometer.po ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-error.po ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-info.po ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-warning.po ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po ceilometer/locale/ru/LC_MESSAGES/ceilometer.po ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer-log-error.po ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer-log-info.po ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer-log-warning.po ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po ceilometer/meter/__init__.py ceilometer/meter/notifications.py ceilometer/meter/data/meters.yaml ceilometer/network/__init__.py ceilometer/network/floatingip.py ceilometer/network/notifications.py ceilometer/network/services/__init__.py ceilometer/network/services/base.py ceilometer/network/services/discovery.py ceilometer/network/services/fwaas.py ceilometer/network/services/lbaas.py ceilometer/network/services/vpnaas.py ceilometer/network/statistics/__init__.py ceilometer/network/statistics/driver.py ceilometer/network/statistics/flow.py ceilometer/network/statistics/port.py ceilometer/network/statistics/switch.py ceilometer/network/statistics/table.py ceilometer/network/statistics/opencontrail/__init__.py ceilometer/network/statistics/opencontrail/client.py ceilometer/network/statistics/opencontrail/driver.py ceilometer/network/statistics/opendaylight/__init__.py ceilometer/network/statistics/opendaylight/client.py ceilometer/network/statistics/opendaylight/driver.py ceilometer/objectstore/__init__.py ceilometer/objectstore/rgw.py ceilometer/objectstore/rgw_client.py ceilometer/objectstore/swift.py ceilometer/publisher/__init__.py ceilometer/publisher/direct.py ceilometer/publisher/file.py ceilometer/publisher/kafka_broker.py ceilometer/publisher/messaging.py ceilometer/publisher/test.py ceilometer/publisher/udp.py ceilometer/publisher/utils.py ceilometer/storage/__init__.py ceilometer/storage/base.py ceilometer/storage/impl_db2.py ceilometer/storage/impl_hbase.py ceilometer/storage/impl_log.py ceilometer/storage/impl_mongodb.py ceilometer/storage/impl_sqlalchemy.py ceilometer/storage/models.py ceilometer/storage/pymongo_base.py ceilometer/storage/hbase/__init__.py ceilometer/storage/hbase/base.py ceilometer/storage/hbase/inmemory.py ceilometer/storage/hbase/migration.py ceilometer/storage/hbase/utils.py ceilometer/storage/mongo/__init__.py ceilometer/storage/mongo/utils.py ceilometer/storage/sqlalchemy/__init__.py ceilometer/storage/sqlalchemy/migration.py ceilometer/storage/sqlalchemy/models.py ceilometer/storage/sqlalchemy/utils.py ceilometer/storage/sqlalchemy/migrate_repo/README ceilometer/storage/sqlalchemy/migrate_repo/__init__.py ceilometer/storage/sqlalchemy/migrate_repo/manage.py ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.py ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.py ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.py ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.py ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.py ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_upgrade.sql ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_upgrade.sql ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.py ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.py ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.py ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.py ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.py ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py ceilometer/storage/sqlalchemy/migrate_repo/versions/043_reduce_uuid_data_types.py ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_types.py ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.py ceilometer/storage/sqlalchemy/migrate_repo/versions/__init__.py ceilometer/telemetry/__init__.py ceilometer/telemetry/notifications.py ceilometer/tests/__init__.py ceilometer/tests/base.py ceilometer/tests/db.py ceilometer/tests/mocks.py ceilometer/tests/pipeline_base.py ceilometer/tests/functional/__init__.py ceilometer/tests/functional/test_bin.py ceilometer/tests/functional/test_collector.py ceilometer/tests/functional/test_notification.py ceilometer/tests/functional/api/__init__.py ceilometer/tests/functional/api/v2/__init__.py ceilometer/tests/functional/api/v2/test_acl_scenarios.py ceilometer/tests/functional/api/v2/test_api_upgrade.py ceilometer/tests/functional/api/v2/test_app.py ceilometer/tests/functional/api/v2/test_capabilities.py ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py ceilometer/tests/functional/api/v2/test_event_scenarios.py ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py ceilometer/tests/functional/api/v2/test_list_resources_scenarios.py ceilometer/tests/functional/api/v2/test_list_samples_scenarios.py ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py ceilometer/tests/functional/api/v2/test_statistics_scenarios.py ceilometer/tests/functional/gabbi/__init__.py ceilometer/tests/functional/gabbi/fixtures.py ceilometer/tests/functional/gabbi/gabbi_paste.ini ceilometer/tests/functional/gabbi/gabbi_pipeline.yaml ceilometer/tests/functional/gabbi/test_gabbi.py ceilometer/tests/functional/gabbi/test_gabbi_prefix.py ceilometer/tests/functional/gabbi/gabbits/api_events_no_data.yaml ceilometer/tests/functional/gabbi/gabbits/api_events_with_data.yaml ceilometer/tests/functional/gabbi/gabbits/basic.yaml ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml ceilometer/tests/functional/gabbi/gabbits/meters.yaml ceilometer/tests/functional/gabbi/gabbits/middleware.yaml ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml ceilometer/tests/functional/gabbi/gabbits/samples.yaml ceilometer/tests/functional/gabbi/gabbits_prefix/basic.yaml ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml ceilometer/tests/functional/hooks/post_test_hook.sh ceilometer/tests/functional/publisher/__init__.py ceilometer/tests/functional/publisher/test_direct.py ceilometer/tests/functional/storage/__init__.py ceilometer/tests/functional/storage/test_impl_db2.py ceilometer/tests/functional/storage/test_impl_hbase.py ceilometer/tests/functional/storage/test_impl_log.py ceilometer/tests/functional/storage/test_impl_mongodb.py ceilometer/tests/functional/storage/test_impl_sqlalchemy.py ceilometer/tests/functional/storage/test_pymongo_base.py ceilometer/tests/functional/storage/test_storage_scenarios.py ceilometer/tests/integration/__init__.py ceilometer/tests/integration/gabbi/__init__.py ceilometer/tests/integration/gabbi/test_gabbi_live.py ceilometer/tests/integration/gabbi/gabbits-live/autoscaling.yaml ceilometer/tests/integration/gabbi/gabbits-live/create_stack.json ceilometer/tests/integration/gabbi/gabbits-live/update_stack.json ceilometer/tests/integration/hooks/post_test_hook.sh ceilometer/tests/tempest/__init__.py ceilometer/tests/tempest/config.py ceilometer/tests/tempest/exceptions.py ceilometer/tests/tempest/plugin.py ceilometer/tests/tempest/api/__init__.py ceilometer/tests/tempest/api/base.py ceilometer/tests/tempest/api/test_telemetry_notification_api.py ceilometer/tests/tempest/scenario/__init__.py ceilometer/tests/tempest/scenario/test_object_storage_telemetry_middleware.py ceilometer/tests/tempest/service/__init__.py ceilometer/tests/tempest/service/client.py ceilometer/tests/tempest/service/images/__init__.py ceilometer/tests/tempest/service/images/glance_http.py ceilometer/tests/tempest/service/images/v1/__init__.py ceilometer/tests/tempest/service/images/v1/images_client.py ceilometer/tests/tempest/service/images/v2/__init__.py ceilometer/tests/tempest/service/images/v2/images_client.py ceilometer/tests/unit/__init__.py ceilometer/tests/unit/test_coordination.py ceilometer/tests/unit/test_declarative.py ceilometer/tests/unit/test_decoupled_pipeline.py ceilometer/tests/unit/test_event_pipeline.py ceilometer/tests/unit/test_messaging.py ceilometer/tests/unit/test_middleware.py ceilometer/tests/unit/test_neutronclient.py ceilometer/tests/unit/test_neutronclient_lbaas_v2.py ceilometer/tests/unit/test_novaclient.py ceilometer/tests/unit/test_sample.py ceilometer/tests/unit/test_utils.py ceilometer/tests/unit/agent/__init__.py ceilometer/tests/unit/agent/agentbase.py ceilometer/tests/unit/agent/test_discovery.py ceilometer/tests/unit/agent/test_manager.py ceilometer/tests/unit/agent/test_plugin.py ceilometer/tests/unit/api/__init__.py ceilometer/tests/unit/api/test_app.py ceilometer/tests/unit/api/test_hooks.py ceilometer/tests/unit/api/test_versions.py ceilometer/tests/unit/api/v2/__init__.py ceilometer/tests/unit/api/v2/test_complex_query.py ceilometer/tests/unit/api/v2/test_query.py ceilometer/tests/unit/api/v2/test_statistics.py ceilometer/tests/unit/api/v2/test_wsme_custom_type.py ceilometer/tests/unit/compute/__init__.py ceilometer/tests/unit/compute/test_discovery.py ceilometer/tests/unit/compute/notifications/__init__.py ceilometer/tests/unit/compute/notifications/test_instance.py ceilometer/tests/unit/compute/pollsters/__init__.py ceilometer/tests/unit/compute/pollsters/base.py ceilometer/tests/unit/compute/pollsters/test_cpu.py ceilometer/tests/unit/compute/pollsters/test_diskio.py ceilometer/tests/unit/compute/pollsters/test_instance.py ceilometer/tests/unit/compute/pollsters/test_location_metadata.py ceilometer/tests/unit/compute/pollsters/test_memory.py ceilometer/tests/unit/compute/pollsters/test_net.py ceilometer/tests/unit/compute/virt/__init__.py ceilometer/tests/unit/compute/virt/hyperv/__init__.py ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py ceilometer/tests/unit/compute/virt/libvirt/__init__.py ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py ceilometer/tests/unit/compute/virt/vmware/__init__.py ceilometer/tests/unit/compute/virt/vmware/test_inspector.py ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py ceilometer/tests/unit/compute/virt/xenapi/__init__.py ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py ceilometer/tests/unit/dispatcher/__init__.py ceilometer/tests/unit/dispatcher/test_db.py ceilometer/tests/unit/dispatcher/test_dispatcher.py ceilometer/tests/unit/dispatcher/test_file.py ceilometer/tests/unit/dispatcher/test_gnocchi.py ceilometer/tests/unit/dispatcher/test_http.py ceilometer/tests/unit/energy/__init__.py ceilometer/tests/unit/energy/test_kwapi.py ceilometer/tests/unit/event/__init__.py ceilometer/tests/unit/event/test_converter.py ceilometer/tests/unit/event/test_endpoint.py ceilometer/tests/unit/event/test_trait_plugins.py ceilometer/tests/unit/hardware/__init__.py ceilometer/tests/unit/hardware/inspector/__init__.py ceilometer/tests/unit/hardware/inspector/test_inspector.py ceilometer/tests/unit/hardware/inspector/test_snmp.py ceilometer/tests/unit/hardware/pollsters/__init__.py ceilometer/tests/unit/hardware/pollsters/test_generic.py ceilometer/tests/unit/hardware/pollsters/test_util.py ceilometer/tests/unit/image/__init__.py ceilometer/tests/unit/image/test_glance.py ceilometer/tests/unit/ipmi/__init__.py ceilometer/tests/unit/ipmi/notifications/__init__.py ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py ceilometer/tests/unit/ipmi/notifications/test_ironic.py ceilometer/tests/unit/ipmi/platform/__init__.py ceilometer/tests/unit/ipmi/platform/fake_utils.py ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py ceilometer/tests/unit/ipmi/platform/test_intel_node_manager.py ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py ceilometer/tests/unit/ipmi/pollsters/__init__.py ceilometer/tests/unit/ipmi/pollsters/base.py ceilometer/tests/unit/ipmi/pollsters/test_node.py ceilometer/tests/unit/ipmi/pollsters/test_sensor.py ceilometer/tests/unit/meter/__init__.py ceilometer/tests/unit/meter/test_meter_plugins.py ceilometer/tests/unit/meter/test_notifications.py ceilometer/tests/unit/network/__init__.py ceilometer/tests/unit/network/test_floating_ip.py ceilometer/tests/unit/network/test_notifications.py ceilometer/tests/unit/network/services/__init__.py ceilometer/tests/unit/network/services/test_fwaas.py ceilometer/tests/unit/network/services/test_lbaas.py ceilometer/tests/unit/network/services/test_lbaas_v2.py ceilometer/tests/unit/network/services/test_vpnaas.py ceilometer/tests/unit/network/statistics/__init__.py ceilometer/tests/unit/network/statistics/test_driver.py ceilometer/tests/unit/network/statistics/test_flow.py ceilometer/tests/unit/network/statistics/test_port.py ceilometer/tests/unit/network/statistics/test_statistics.py ceilometer/tests/unit/network/statistics/test_switch.py ceilometer/tests/unit/network/statistics/test_table.py ceilometer/tests/unit/network/statistics/opencontrail/__init__.py ceilometer/tests/unit/network/statistics/opencontrail/test_client.py ceilometer/tests/unit/network/statistics/opencontrail/test_driver.py ceilometer/tests/unit/network/statistics/opendaylight/__init__.py ceilometer/tests/unit/network/statistics/opendaylight/test_client.py ceilometer/tests/unit/network/statistics/opendaylight/test_driver.py ceilometer/tests/unit/objectstore/__init__.py ceilometer/tests/unit/objectstore/test_rgw.py ceilometer/tests/unit/objectstore/test_rgw_client.py ceilometer/tests/unit/objectstore/test_swift.py ceilometer/tests/unit/publisher/__init__.py ceilometer/tests/unit/publisher/test_file.py ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py ceilometer/tests/unit/publisher/test_messaging_publisher.py ceilometer/tests/unit/publisher/test_udp.py ceilometer/tests/unit/publisher/test_utils.py ceilometer/tests/unit/storage/__init__.py ceilometer/tests/unit/storage/test_base.py ceilometer/tests/unit/storage/test_get_connection.py ceilometer/tests/unit/storage/test_models.py ceilometer/tests/unit/storage/sqlalchemy/__init__.py ceilometer/tests/unit/storage/sqlalchemy/test_models.py ceilometer/tests/unit/telemetry/__init__.py ceilometer/tests/unit/telemetry/test_notifications.py ceilometer/tests/unit/transformer/__init__.py ceilometer/tests/unit/transformer/test_conversions.py ceilometer/transformer/__init__.py ceilometer/transformer/accumulator.py ceilometer/transformer/arithmetic.py ceilometer/transformer/conversions.py devstack/README.rst devstack/apache-ceilometer.template devstack/plugin.sh devstack/settings devstack/files/rpms/ceilometer devstack/upgrade/settings devstack/upgrade/shutdown.sh devstack/upgrade/upgrade.sh doc/Makefile doc/source/1-agents.png doc/source/2-1-collection-notification.png doc/source/2-2-collection-poll.png doc/source/2-accessmodel.png doc/source/3-Pipeline.png doc/source/4-Transformer.png doc/source/5-multi-publish.png doc/source/6-storagemodel.png doc/source/architecture.rst doc/source/ceilo-arch.png doc/source/ceilo-gnocchi-arch.png doc/source/conf.py doc/source/configuration.rst doc/source/contributing.rst doc/source/events.rst doc/source/format.rst doc/source/glossary.rst doc/source/gmr.rst doc/source/index.rst doc/source/measurements.rst doc/source/new_meters.rst doc/source/overview.rst doc/source/plugins.rst doc/source/testing.rst doc/source/_templates/.placeholder doc/source/api/index.rst doc/source/install/dbreco.rst doc/source/install/development.rst doc/source/install/index.rst doc/source/install/manual.rst doc/source/install/mod_wsgi.rst doc/source/install/upgrade.rst doc/source/releasenotes/folsom.rst doc/source/releasenotes/index.rst doc/source/webapi/index.rst doc/source/webapi/v2.rst etc/apache2/ceilometer etc/ceilometer/README-ceilometer.conf.txt etc/ceilometer/api_paste.ini etc/ceilometer/ceilometer-config-generator.conf etc/ceilometer/event_definitions.yaml etc/ceilometer/event_pipeline.yaml etc/ceilometer/gnocchi_resources.yaml etc/ceilometer/pipeline.yaml etc/ceilometer/policy.json etc/ceilometer/rootwrap.conf etc/ceilometer/examples/loadbalancer_v2_meter_definitions.yaml etc/ceilometer/examples/osprofiler_event_definitions.yaml etc/ceilometer/rootwrap.d/ipmi.filters rally-jobs/README.rst rally-jobs/ceilometer.yaml rally-jobs/extra/README.rst rally-jobs/extra/fake.img rally-jobs/plugins/README.rst rally-jobs/plugins/plugin_sample.py releasenotes/notes/.placeholder releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml releasenotes/notes/always-requeue-7a2df9243987ab67.yaml releasenotes/notes/batch-messaging-d126cc525879d58e.yaml releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml releasenotes/notes/event-type-race-c295baf7f1661eab.yaml releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml releasenotes/notes/keystone-v3-fab1e257c5672965.yaml releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml releasenotes/notes/remove-eventlet-6738321434b60c78.yaml releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml releasenotes/notes/support-None-query-45abaae45f08eda4.yaml releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder tools/__init__.py tools/ceilometer-test-event.py tools/lintstack.py tools/lintstack.sh tools/make_test_data.py tools/make_test_data.sh tools/make_test_event_data.py tools/pretty_tox.sh tools/send_test_data.py tools/show_data.py tools/test_hbase_table_utils.pyceilometer-6.1.5/ceilometer.egg-info/requires.txt0000664000567000056710000000167713072745162023260 0ustar jenkinsjenkins00000000000000retrying!=1.3.0,>=1.2.3 jsonpath-rw-ext>=0.1.9 jsonschema!=2.5.0,<3.0.0,>=2.0.0 kafka-python<1.0.0,>=0.9.5 keystonemiddleware!=4.1.0,>=4.0.0 lxml>=2.3 msgpack-python>=0.4.0 oslo.context>=0.2.0 oslo.db>=4.1.0 oslo.concurrency>=3.5.0 oslo.config>=3.7.0 oslo.i18n>=2.1.0 oslo.log>=1.14.0 oslo.policy>=0.5.0 oslo.reports>=0.6.0 oslo.rootwrap>=2.0.0 oslo.service>=1.0.0 PasteDeploy>=1.5.0 pbr>=1.6 pecan>=1.0.0 oslo.messaging>=4.0.0,<=5.17.1 oslo.middleware>=3.0.0 oslo.serialization>=1.10.0 oslo.utils>=3.5.0 pysnmp<5.0.0,>=4.2.3 python-ceilometerclient>=2.2.1 python-glanceclient>=2.0.0 python-keystoneclient!=1.8.0,!=2.1.0,<3.0.0,>=1.6.0 keystoneauth1>=2.1.0 python-neutronclient!=4.1.0,>=2.6.0 python-novaclient!=2.33.0,>=2.29.0,<7.0.0 python-swiftclient>=2.2.0 PyYAML>=3.1.0 requests!=2.9.0,>=2.8.1 six>=1.9.0 SQLAlchemy<1.1.0,>=1.0.10 sqlalchemy-migrate>=0.9.6 stevedore>=1.5.0 tooz>=1.28.0 Werkzeug>=0.7 WebOb>=1.2.3,<1.7.0 WSME>=0.8 python-dateutil>=2.4.2 ceilometer-6.1.5/ceilometer.egg-info/top_level.txt0000664000567000056710000000001313072745162023371 0ustar jenkinsjenkins00000000000000ceilometer ceilometer-6.1.5/ceilometer.egg-info/pbr.json0000664000567000056710000000005613072745162022324 0ustar jenkinsjenkins00000000000000{"git_version": "e18f209", "is_release": true}ceilometer-6.1.5/ceilometer.egg-info/entry_points.txt0000664000567000056710000003576713072745162024165 0ustar jenkinsjenkins00000000000000[ceilometer.builder.poll.central] hardware.snmp = ceilometer.hardware.pollsters.generic:GenericHardwareDeclarativePollster [ceilometer.compute.virt] hyperv = ceilometer.compute.virt.hyperv.inspector:HyperVInspector libvirt = ceilometer.compute.virt.libvirt.inspector:LibvirtInspector vsphere = ceilometer.compute.virt.vmware.inspector:VsphereInspector xenapi = ceilometer.compute.virt.xenapi.inspector:XenapiInspector [ceilometer.discover] endpoint = ceilometer.agent.discovery.endpoint:EndpointDiscovery fip_services = ceilometer.network.services.discovery:FloatingIPDiscovery fw_policy = ceilometer.network.services.discovery:FirewallPolicyDiscovery fw_services = ceilometer.network.services.discovery:FirewallDiscovery ipsec_connections = ceilometer.network.services.discovery:IPSecConnectionsDiscovery lb_health_probes = ceilometer.network.services.discovery:LBHealthMonitorsDiscovery lb_listeners = ceilometer.network.services.discovery:LBListenersDiscovery lb_loadbalancers = ceilometer.network.services.discovery:LBLoadBalancersDiscovery lb_members = ceilometer.network.services.discovery:LBMembersDiscovery lb_pools = ceilometer.network.services.discovery:LBPoolsDiscovery lb_vips = ceilometer.network.services.discovery:LBVipsDiscovery local_instances = ceilometer.compute.discovery:InstanceDiscovery local_node = ceilometer.agent.discovery.localnode:LocalNodeDiscovery tenant = ceilometer.agent.discovery.tenant:TenantDiscovery tripleo_overcloud_nodes = ceilometer.hardware.discovery:NodesDiscoveryTripleO vpn_services = ceilometer.network.services.discovery:VPNServicesDiscovery [ceilometer.dispatcher.event] database = ceilometer.dispatcher.database:DatabaseDispatcher file = ceilometer.dispatcher.file:FileDispatcher http = ceilometer.dispatcher.http:HttpDispatcher [ceilometer.dispatcher.meter] database = ceilometer.dispatcher.database:DatabaseDispatcher file = ceilometer.dispatcher.file:FileDispatcher gnocchi = ceilometer.dispatcher.gnocchi:GnocchiDispatcher http = ceilometer.dispatcher.http:HttpDispatcher [ceilometer.event.publisher] direct = ceilometer.publisher.direct:DirectPublisher kafka = ceilometer.publisher.kafka_broker:KafkaBrokerPublisher notifier = ceilometer.publisher.messaging:EventNotifierPublisher test = ceilometer.publisher.test:TestPublisher [ceilometer.event.storage] db2 = ceilometer.event.storage.impl_db2:Connection es = ceilometer.event.storage.impl_elasticsearch:Connection hbase = ceilometer.event.storage.impl_hbase:Connection log = ceilometer.event.storage.impl_log:Connection mongodb = ceilometer.event.storage.impl_mongodb:Connection mysql = ceilometer.event.storage.impl_sqlalchemy:Connection postgresql = ceilometer.event.storage.impl_sqlalchemy:Connection sqlite = ceilometer.event.storage.impl_sqlalchemy:Connection [ceilometer.event.trait_plugin] bitfield = ceilometer.event.trait_plugins:BitfieldTraitPlugin split = ceilometer.event.trait_plugins:SplitterTraitPlugin timedelta = ceilometer.event.trait_plugins:TimedeltaPlugin [ceilometer.hardware.inspectors] snmp = ceilometer.hardware.inspector.snmp:SNMPInspector [ceilometer.metering.storage] db2 = ceilometer.storage.impl_db2:Connection hbase = ceilometer.storage.impl_hbase:Connection log = ceilometer.storage.impl_log:Connection mongodb = ceilometer.storage.impl_mongodb:Connection mysql = ceilometer.storage.impl_sqlalchemy:Connection postgresql = ceilometer.storage.impl_sqlalchemy:Connection sqlite = ceilometer.storage.impl_sqlalchemy:Connection [ceilometer.notification] _sample = ceilometer.telemetry.notifications:TelemetryIpc floatingip = ceilometer.network.notifications:FloatingIP hardware.ipmi.current = ceilometer.ipmi.notifications.ironic:CurrentSensorNotification hardware.ipmi.fan = ceilometer.ipmi.notifications.ironic:FanSensorNotification hardware.ipmi.temperature = ceilometer.ipmi.notifications.ironic:TemperatureSensorNotification hardware.ipmi.voltage = ceilometer.ipmi.notifications.ironic:VoltageSensorNotification http.request = ceilometer.middleware:HTTPRequest http.response = ceilometer.middleware:HTTPResponse instance = ceilometer.compute.notifications.instance:Instance instance_scheduled = ceilometer.compute.notifications.instance:InstanceScheduled meter = ceilometer.meter.notifications:ProcessMeterNotifications network = ceilometer.network.notifications:Network network.services.firewall = ceilometer.network.notifications:Firewall network.services.firewall.policy = ceilometer.network.notifications:FirewallPolicy network.services.firewall.rule = ceilometer.network.notifications:FirewallRule network.services.lb.health_monitor = ceilometer.network.notifications:HealthMonitor network.services.lb.member = ceilometer.network.notifications:Member network.services.lb.pool = ceilometer.network.notifications:Pool network.services.lb.vip = ceilometer.network.notifications:Vip network.services.vpn = ceilometer.network.notifications:VPNService network.services.vpn.connections = ceilometer.network.notifications:IPSecSiteConnection network.services.vpn.ikepolicy = ceilometer.network.notifications:IKEPolicy network.services.vpn.ipsecpolicy = ceilometer.network.notifications:IPSecPolicy port = ceilometer.network.notifications:Port router = ceilometer.network.notifications:Router subnet = ceilometer.network.notifications:Subnet [ceilometer.poll.central] energy = ceilometer.energy.kwapi:EnergyPollster image = ceilometer.image.glance:ImagePollster image.size = ceilometer.image.glance:ImageSizePollster ip.floating = ceilometer.network.floatingip:FloatingIPPollster network.services.firewall = ceilometer.network.services.fwaas:FirewallPollster network.services.firewall.policy = ceilometer.network.services.fwaas:FirewallPolicyPollster network.services.lb.active.connections = ceilometer.network.services.lbaas:LBActiveConnectionsPollster network.services.lb.health_monitor = ceilometer.network.services.lbaas:LBHealthMonitorPollster network.services.lb.incoming.bytes = ceilometer.network.services.lbaas:LBBytesInPollster network.services.lb.listener = ceilometer.network.services.lbaas:LBListenerPollster network.services.lb.loadbalancer = ceilometer.network.services.lbaas:LBLoadBalancerPollster network.services.lb.member = ceilometer.network.services.lbaas:LBMemberPollster network.services.lb.outgoing.bytes = ceilometer.network.services.lbaas:LBBytesOutPollster network.services.lb.pool = ceilometer.network.services.lbaas:LBPoolPollster network.services.lb.total.connections = ceilometer.network.services.lbaas:LBTotalConnectionsPollster network.services.lb.vip = ceilometer.network.services.lbaas:LBVipPollster network.services.vpn = ceilometer.network.services.vpnaas:VPNServicesPollster network.services.vpn.connections = ceilometer.network.services.vpnaas:IPSecConnectionsPollster power = ceilometer.energy.kwapi:PowerPollster rgw.containers.objects = ceilometer.objectstore.rgw:ContainersObjectsPollster rgw.containers.objects.size = ceilometer.objectstore.rgw:ContainersSizePollster rgw.objects = ceilometer.objectstore.rgw:ObjectsPollster rgw.objects.containers = ceilometer.objectstore.rgw:ObjectsContainersPollster rgw.objects.size = ceilometer.objectstore.rgw:ObjectsSizePollster rgw.usage = ceilometer.objectstore.rgw:UsagePollster storage.containers.objects = ceilometer.objectstore.swift:ContainersObjectsPollster storage.containers.objects.size = ceilometer.objectstore.swift:ContainersSizePollster storage.objects = ceilometer.objectstore.swift:ObjectsPollster storage.objects.containers = ceilometer.objectstore.swift:ObjectsContainersPollster storage.objects.size = ceilometer.objectstore.swift:ObjectsSizePollster switch = ceilometer.network.statistics.switch:SWPollster switch.flow = ceilometer.network.statistics.flow:FlowPollster switch.flow.bytes = ceilometer.network.statistics.flow:FlowPollsterBytes switch.flow.duration.nanoseconds = ceilometer.network.statistics.flow:FlowPollsterDurationNanoseconds switch.flow.duration.seconds = ceilometer.network.statistics.flow:FlowPollsterDurationSeconds switch.flow.packets = ceilometer.network.statistics.flow:FlowPollsterPackets switch.port = ceilometer.network.statistics.port:PortPollster switch.port.collision.count = ceilometer.network.statistics.port:PortPollsterCollisionCount switch.port.receive.bytes = ceilometer.network.statistics.port:PortPollsterReceiveBytes switch.port.receive.crc_error = ceilometer.network.statistics.port:PortPollsterReceiveCRCErrors switch.port.receive.drops = ceilometer.network.statistics.port:PortPollsterReceiveDrops switch.port.receive.errors = ceilometer.network.statistics.port:PortPollsterReceiveErrors switch.port.receive.frame_error = ceilometer.network.statistics.port:PortPollsterReceiveFrameErrors switch.port.receive.overrun_error = ceilometer.network.statistics.port:PortPollsterReceiveOverrunErrors switch.port.receive.packets = ceilometer.network.statistics.port:PortPollsterReceivePackets switch.port.transmit.bytes = ceilometer.network.statistics.port:PortPollsterTransmitBytes switch.port.transmit.drops = ceilometer.network.statistics.port:PortPollsterTransmitDrops switch.port.transmit.errors = ceilometer.network.statistics.port:PortPollsterTransmitErrors switch.port.transmit.packets = ceilometer.network.statistics.port:PortPollsterTransmitPackets switch.table = ceilometer.network.statistics.table:TablePollster switch.table.active.entries = ceilometer.network.statistics.table:TablePollsterActiveEntries switch.table.lookup.packets = ceilometer.network.statistics.table:TablePollsterLookupPackets switch.table.matched.packets = ceilometer.network.statistics.table:TablePollsterMatchedPackets [ceilometer.poll.compute] cpu = ceilometer.compute.pollsters.cpu:CPUPollster cpu_util = ceilometer.compute.pollsters.cpu:CPUUtilPollster disk.allocation = ceilometer.compute.pollsters.disk:AllocationPollster disk.capacity = ceilometer.compute.pollsters.disk:CapacityPollster disk.device.allocation = ceilometer.compute.pollsters.disk:PerDeviceAllocationPollster disk.device.capacity = ceilometer.compute.pollsters.disk:PerDeviceCapacityPollster disk.device.iops = ceilometer.compute.pollsters.disk:PerDeviceDiskIOPSPollster disk.device.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskLatencyPollster disk.device.read.bytes = ceilometer.compute.pollsters.disk:PerDeviceReadBytesPollster disk.device.read.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceReadBytesRatePollster disk.device.read.requests = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsPollster disk.device.read.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsRatePollster disk.device.usage = ceilometer.compute.pollsters.disk:PerDevicePhysicalPollster disk.device.write.bytes = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesPollster disk.device.write.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesRatePollster disk.device.write.requests = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsPollster disk.device.write.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsRatePollster disk.iops = ceilometer.compute.pollsters.disk:DiskIOPSPollster disk.latency = ceilometer.compute.pollsters.disk:DiskLatencyPollster disk.read.bytes = ceilometer.compute.pollsters.disk:ReadBytesPollster disk.read.bytes.rate = ceilometer.compute.pollsters.disk:ReadBytesRatePollster disk.read.requests = ceilometer.compute.pollsters.disk:ReadRequestsPollster disk.read.requests.rate = ceilometer.compute.pollsters.disk:ReadRequestsRatePollster disk.usage = ceilometer.compute.pollsters.disk:PhysicalPollster disk.write.bytes = ceilometer.compute.pollsters.disk:WriteBytesPollster disk.write.bytes.rate = ceilometer.compute.pollsters.disk:WriteBytesRatePollster disk.write.requests = ceilometer.compute.pollsters.disk:WriteRequestsPollster disk.write.requests.rate = ceilometer.compute.pollsters.disk:WriteRequestsRatePollster instance = ceilometer.compute.pollsters.instance:InstancePollster memory.resident = ceilometer.compute.pollsters.memory:MemoryResidentPollster memory.usage = ceilometer.compute.pollsters.memory:MemoryUsagePollster network.incoming.bytes = ceilometer.compute.pollsters.net:IncomingBytesPollster network.incoming.bytes.rate = ceilometer.compute.pollsters.net:IncomingBytesRatePollster network.incoming.packets = ceilometer.compute.pollsters.net:IncomingPacketsPollster network.outgoing.bytes = ceilometer.compute.pollsters.net:OutgoingBytesPollster network.outgoing.bytes.rate = ceilometer.compute.pollsters.net:OutgoingBytesRatePollster network.outgoing.packets = ceilometer.compute.pollsters.net:OutgoingPacketsPollster [ceilometer.poll.ipmi] hardware.ipmi.current = ceilometer.ipmi.pollsters.sensor:CurrentSensorPollster hardware.ipmi.fan = ceilometer.ipmi.pollsters.sensor:FanSensorPollster hardware.ipmi.node.airflow = ceilometer.ipmi.pollsters.node:AirflowPollster hardware.ipmi.node.cpu_util = ceilometer.ipmi.pollsters.node:CPUUtilPollster hardware.ipmi.node.cups = ceilometer.ipmi.pollsters.node:CUPSIndexPollster hardware.ipmi.node.io_util = ceilometer.ipmi.pollsters.node:IOUtilPollster hardware.ipmi.node.mem_util = ceilometer.ipmi.pollsters.node:MemUtilPollster hardware.ipmi.node.outlet_temperature = ceilometer.ipmi.pollsters.node:OutletTemperaturePollster hardware.ipmi.node.power = ceilometer.ipmi.pollsters.node:PowerPollster hardware.ipmi.node.temperature = ceilometer.ipmi.pollsters.node:InletTemperaturePollster hardware.ipmi.temperature = ceilometer.ipmi.pollsters.sensor:TemperatureSensorPollster hardware.ipmi.voltage = ceilometer.ipmi.pollsters.sensor:VoltageSensorPollster [ceilometer.publisher] direct = ceilometer.publisher.direct:DirectPublisher file = ceilometer.publisher.file:FilePublisher kafka = ceilometer.publisher.kafka_broker:KafkaBrokerPublisher notifier = ceilometer.publisher.messaging:SampleNotifierPublisher test = ceilometer.publisher.test:TestPublisher udp = ceilometer.publisher.udp:UDPPublisher [ceilometer.transformer] accumulator = ceilometer.transformer.accumulator:TransformerAccumulator aggregator = ceilometer.transformer.conversions:AggregatorTransformer arithmetic = ceilometer.transformer.arithmetic:ArithmeticTransformer delta = ceilometer.transformer.conversions:DeltaTransformer rate_of_change = ceilometer.transformer.conversions:RateOfChangeTransformer unit_conversion = ceilometer.transformer.conversions:ScalingTransformer [console_scripts] ceilometer-agent-notification = ceilometer.cmd.agent_notification:main ceilometer-api = ceilometer.cmd.api:main ceilometer-collector = ceilometer.cmd.collector:main ceilometer-dbsync = ceilometer.cmd.storage:dbsync ceilometer-expirer = ceilometer.cmd.storage:expirer ceilometer-polling = ceilometer.cmd.polling:main ceilometer-rootwrap = oslo_rootwrap.cmd:main ceilometer-send-sample = ceilometer.cmd.sample:send_sample [keystoneauth1.plugin] password-ceilometer-legacy = ceilometer.keystone_client:LegacyCeilometerKeystoneLoader [network.statistics.drivers] opencontrail = ceilometer.network.statistics.opencontrail.driver:OpencontrailDriver opendaylight = ceilometer.network.statistics.opendaylight.driver:OpenDayLightDriver [oslo.config.opts] ceilometer = ceilometer.opts:list_opts [oslo.config.opts.defaults] ceilometer = ceilometer.conf.defaults:set_cors_middleware_defaults [tempest.test_plugins] ceilometer_tests = ceilometer.tests.tempest.plugin:CeilometerTempestPlugin ceilometer-6.1.5/ceilometer.egg-info/PKG-INFO0000664000567000056710000000207513072745162021746 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: ceilometer Version: 6.1.5 Summary: OpenStack Telemetry Home-page: http://docs.openstack.org/developer/ceilometer/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: ceilometer ========== Release notes can be read online at: http://docs.openstack.org/developer/ceilometer/releasenotes/index.html Documentation for the project can be found at: http://docs.openstack.org/developer/ceilometer/ The project home is at: http://launchpad.net/ceilometer Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Topic :: System :: Monitoring ceilometer-6.1.5/ChangeLog0000664000567000056710000034726713072745162016620 0ustar jenkinsjenkins00000000000000CHANGES ======= 6.1.5 ----- * Add tempest and cap oslo.messaging/tempest version * match generic cirros name * Revert "Fix the gate failure because of several issues" * cap python-novaclient/oslo.vmware/gabbi 6.1.4 ----- * Revert "Add hypervisor inspector sanity check" * Fix the gate failure because of several issues * fix mitaka gate grab bag * Use trusts in Heat integration test * consumes error notif. when event are disabled * Check lbaas version if call is v2 specific 6.1.3 ----- * sqlalchemy: do not run upgrade on fresh install * sqlalchemy: fix JSONEncodedDict implementation type * Refactor floatingip pollster to use discovery 6.1.2 ----- * events: fix operator check in event filter 6.1.1 ----- * Imported Translations from Zanata * add log decorator for neutron_client public method 6.1.0 ----- * dispacher/gnocchi: measures sent fix logging * Copy images_client from tempest + Correct concurrency of gabbi 1.22.0 * catch DriverLoadFailure for get_transport optional * enable swift pollsters poll data for specific region * Imported Translations from Zanata * tempest_plugin: drop telemetry decorator * Updated from global requirements * [Trivial] Update Neutron resource status list * fix CI failure due to oslo.messaging 5.0.0 * Fix notification listeners usage * Imported Translations from Zanata * Fix Ceilometer tests config options * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata 6.0.0 ----- * collector: never allow to lose data * Imported Translations from Zanata 6.0.0.0rc2 ---------- * abort alarms URLs when Aodh is unavailable * Imported Translations from Zanata * Imported Translations from Zanata * remove dns and trove from entry_points * Imported Translations from Zanata * Remove gabbi tests that check content-location * Imported Translations from Zanata * Update .gitreview for stable/mitaka 6.0.0.0rc1 ---------- * Imported Translations from Zanata * add rc1 release notes * Use assertIn and assertNotIn * core status cleanup * tests: remove ceilometer-api bin test cases * gate: add missing sudo * change dns and trove notifications to declarative * Remove en_GB translations * register the config generator default hook with the right name * Imported Translations from Zanata * Updated from global requirements * tempest: migrate api and scnario tests from tempest * mitaka-3 release notes * Adjust log levels for InstanceShutOffException * Fix event_type creationg failure due to race condition * Imported Translations from Zanata * Ignoring cpu measurement when instance's state is SHUTOFF * Add validation for polling_namespaces option * xenapi: support the session when xenserver is slave * Imported Translations from Zanata * gnocchi dispatch: Added new resource type support * remove wrong "#!/usr/bin/env python" header * Fixed corner cases of incorrect use of oslo.config * Updated from global requirements * timedelta plugin for meter definition process * Cast Int64 values to int, float in statistics * Cache getters for the decalarative definitions 6.0.0.0b3 --------- * [sahara] add events definitions regarding new notifications * Moved CORS middleware configuration into oslo-config-generator * Add the meter example file 'lbaas-v2-meter-definitions.yaml' * Change default policy to allow create_samples * Enable the Load Balancer v2 events * Remove unused pngmath Sphinx extension * Updated from global requirements * Fix a minor missing parameter issue * close services in test * Add an update interval to compute discovery * Docs: Configure meters/events dispatch separately * Fix the typo in the gnocchiclient exception * Updated from global requirements * Add gnocchi dispatcher opts to config * Change the SERVICE_TENANT_NAME to SERVICE_PROJECT_NAME * Hyper-V: replaces in-tree hyper-v utils usage with os_win * Initial seed of hacking * Add /usr/local/{sbin,bin} to rootwrap exec_dirs * Gnocchi: fix ResourcesDefinitionException for py3 * Change LOG.warn to LOG.warning * tests: fix unworking debug output * Adds timestamp option to Aggregation transformer * remove default=None for config options * Replace assertEqual(None, *) with assertIsNone in tests * Trivial: Cleanup unused conf variables * Enable the Load Balancer v2 for the Ceilometer(Part Two) * Remove unused variable * Enable the Load Balancer v2 for the Ceilometer(Part One) * Fix footnote reference to Aodh in docs * Updated from global requirements * Set None explicitly to filter options * KEYSTONE_CATALOG_BACKEND is deprecated * Use overtest to setup functional backends * devstack: Fix Keystone v3 configuration typo * Imported Translations from Zanata * Handle malformed resource definitions gracefully * Update the home page * Skip duplicate meter definitions * set higher batching requirement * use retrying to attempt to rejoin group * network: remove deprecated option name * sample: remove deprecated option name * Fix wrong capitalization * rewriting history * Remove unused pytz requirement * devstack: use password with version discovery * fix tempest path * Updated from global requirements * raise coordination error if not registered * do not configure worker specific items in init * integration-gate: fix publicURL retrieval * rolling upgrades * fix locking in ceilometer * enable notification agent partitioning * better support notification coordination * remove useless notification listener helper * Lookup meter definition fields correctly * Enhances get_meters to return unique meters * Imported Translations from Zanata * Updated from global requirements * Fix ceilometer floatingip pollster * Updated from global requirements * tempest: migrate base class for tests * tempest: add ceilometer tempest plugin * tempest: add telemetry client manager * tempest: migrate conf.py from tempest tree * tempest: copy telemetry client from tempest tree * Fix events rbac 6.0.0.0b2 --------- * Don't store events with Gnocchi * add additional mitaka-2 release notes * Corrects typo "a other" -> "another" * Updated from global requirements * add release notes for mitaka-2 * devstack: add support for Gnocchi backend * notification: Use oslo.messaging batch listener * Cleanup of Translations * Added CORS support to Ceilometer * Don't set keystonemiddleware cache * Set None explicitly to filter options * Add OSprofiler-specific events definitions * collector: Use oslo.messaging batch listener * Updated from global requirements * Changes aggregator transformer to allow retention_time w/o size * Replace LOG.warn with LOG.warning * Updated from global requirements * wrong accumulative value of "network.services.lb.incoming.bytes" * Trivial: Remove vim header from source files * Trival: Remove unused logging import * Fix the typos in the source code * gnocchi: fix stack resource type * Misspelling in message * Clean pagination related methods of impl_mongodb * Fix some typos in the snmp.py * remove local hacking check * [MongoDB] add indexes in event collection * Remove unused code in gnocchi dispatcher * remove unnecessary code * recheck cache after acquired gnocchi_resource_lock * collector: remove deprecated RPC code * fix case in function name * Catch the EndpointNotFound in keystoneauth1 than in keystoneclient * Log exception if stevedore fails to load module * Updated from global requirements * Revert "Revert "devstack config for dogpile cache"" * add per resource lock * verify gnocchi connection before processing * [refactor] remove redundant import of options * Added unit test cases for pysnmp 4.3 * Add keystoneauth1 in requirements * gnocchi: fix cache hash logic * gnocchi: use gnocchiclient instead of requests * show queue status on integration test * Updated from global requirements * using a consistent uuid as cache namespace * Duplicate information link for writing agent plugins * Use keystoneauth1 instead of manual setup * Do not mock the memcache interface for auth_token * oslo.messaging option group/name change for notification topics * Correct the host field of instance metadata * fix the bug that gnocchi dispatcher can't process single sample * Replace stackforge with openstack * MAINTAINERS: remove outdated data 6.0.0.0b1 --------- * Remove version from setup.cfg * add initial release notes * fix functional gate * messaging: stop using RequestContextSerializer * Fix ceilometer-test-event.py script * Deduplicate the code about snmp meter loading * Updated from global requirements * Revert "devstack config for dogpile cache" * Revert "Workaround requests/urllib connection leaks" * add cpu.delta to gnocchi resources * simplify collector cache * Consistent publisher_id for polling agent * build metric list on init * re-implement thread safe fnmatch * clean up integration test urls * tools: fix default resource metadata for instance * don't pass ceilometer options to oslo.db engine facade * Use str(name) instead of name.prettyPrint() * Reduce code duplication * remove config files when run clean.sh * fix some test case wrongly skipped for mysql backend * Add WebTest to test-requirements.txt * tests: remove testscenario usage for storage drivers * Remove eventlet usage * Remove alarming code * Clarify the doc about multiple notification_topics usage * Reduced source code by extracting duplicated code * devstack config for dogpile cache * Updated from global requirements * Updated from global requirements * Fix an indent nit of enforce_limit method * Move the content of ReleaseNotes to README.rst * use common cache * A dogpile cache of gnocchi resources * Updated from global requirements * install database when collector is enabled * Updated from global requirements * Updated from global requirements * add reno for release notes management * Updated from global requirements * Support to get hardware's cpu_util from snmp * add rohit_ to MAINTAINERS * gnocchi: set the default archive policy to None * Mv gabbi_pipeline.yaml into test directories * Factorize yaml loading of declarative stuffs * Factorize field definition of declarative code * Wrong result is returned when call events getting API * tox: use pretty_tox in most places * Updated from global requirements * avoid unnecessary inner join in get_resources() for SQL backend * Add sql-expire-samples-only to option list * Updated from global requirements * configure Apache only when ceilometer-api is enabled * Imported Translations from Zanata * avoid using isolation level * specify runtime environment for scripts * Using oslo-config-generator to instead of generate-config-file.sh * Use gnocchiclient for integration script * Enable signature verification for events * Correct the timestamp type when make test samples data * Updated from global requirements * avoid generate temporary table when query samples * Reject posting sample with direct=true if Gnocchi is enabled * make script under tools directory executable * Updated from global requirements * Added the README.rst in devstack folder * fix tools/make_test_event_data.py * fix image_ref attr in gnocchi resource * support mysql+pymysql in functional test * Updated from global requirements * Fix snmp pollster to not ignore valid meters * Block oslo.messaging 2.6.1 release * reset policy per test * Remove dependency on sphinxcontrib-docbookrestapi * gnocchi: remove possible ending / in URL * api: simplify root controller * api: simplify Pecan config * remove instance:FLAVOR related code and docs * Do collector setup and storage cleanup for all backends * change collector_workers to [collector]workers * Enable POST samples API when gnocchi enabled * devstack: fix debug info for Gnocchi * Imported Translations from Zanata * Add Liberty release note link * Fix make_test_data.sh * Imported Translations from Zanata * Be explicit when copying files to /etc/ceilometer * Deprecate event trait plugin 'split' * Updated from global requirements * Clean some log messages when polling neutron resources * Simplify the validation of required fields of pipeline source * doc: service enablement not necessary when using Devstack plugin * Skip bad meter definitions instead of erroring out * Remove the unused network_get_all method * mark logging.info translation accordingly * logging cleanup * Updated from global requirements * Remove last vestiges of devstack from grenade plugin * Add missing ceilometerclient repo location 5.0.0 ----- * Imported Translations from Zanata * Fix for resource polling warnings * SQL: Fix event-list with multiple trait query filters * Fix the bug of "Error spelling of a word" * Imported Translations from Zanata * SQL: Fix event-list with multiple trait query filters * Fix a mistake in a test * Configure collector to only record meter or event * Rename list_events tests to list_samples tests * fix elasticsearch script reference * Fix the deprecation note in meter.yaml * Fix the deprecation note in meter.yaml * Remove deprecated archive policy map for Gnocchi * Remove enable_notification.sh * Parametrize table_prefix_separator in hbase * Imported Translations from Zanata * fix typo in storage/impl_sqlalchemy * devstack: install all configuration files from etc/ * dispatcher: remove deprecated CADF code in HTTP * mongodb: remove deprecated replica_set support * Ensure the test data sample has correct signature * Open Mitaka development 5.0.0.0rc1 ---------- * gnocchi: Don't raise NotImplementedError * Add missing meter and exchange opts * Imported Translations from Zanata * Add test to cover history rule change * Workaround requests/urllib connection leaks * integration tests: additional debugging infos * Coordinator handles ToozError when joining group * Don't create neutron client at loadtime * Delete its corresponding history data when deleting an alarm * update event filter test to validate multiple trait args * Fix variable typos * Updated from global requirements * Change ignore-errors to ignore_errors * Fix reconnecting to libvirt * remove batch processing requirement from arithmetic transformer * Cleanup empty dirs from tests * retain existing listeners on refresh * Override dispatcher option for test_alarm_redirect_keystone * [ceilometer] Update links to Cloud Admin Guide * Adds support for dynamic event pipeline * Updated from global requirements * Imported Translations from Zanata * pollster/api now publish to sample queue * tox: generate config file on test run * tox: Allow to pass some OS_* variables * Refactor keystone handling in discovery manager * Use make_sample_from_instance for net-pollster * apply limit constraint on storage base interface * gnocchi: add two new resources * Fixed tox -egenconfig Error * Add declarative meters to developer docs * add delta transfomer support * do not recreate main queue listeners on partitioning * Validate required fields in meter definition * deprecate cadf_only http dispatcher * Fix the heavy time cost of event-list * Update API Doc to deprecate the alarming part * Deprecate config options of the old alarming functionality * update architecture documentation * Add attribute 'state' to meter metadata when source is polling * doc: update devstack usage * Remove useless base class * Split out image non-meters * Make the gabbi tox target work with modern tox * Avoid 500 errors when duplicating limit queries * Correct test_list_meters_meter_id to work with py3 * Updated from global requirements * Update event_definitions for Cinder Image Cache * Update install docs * Use b64encode to replace of encodestring * Prevent ceilometer expirer from causing deadlocks * remove duplicate log exception message * Spelling mistake of comment in api/controllers/v2/query.py * Fix typos in gnocchi.py and converter.py * Updated from global requirements * Updated from global requirements * Add a py34-functional tox target * doc: update notification_driver * polling: remove deprecated agents * Fix string in limit warning * Typo fixing * missed entrypoint for nova_notifier removal * Imported Translations from Transifex * Fix links in README.rst * integration: Add debugging information * deprecate db2 nosql driver * devstack: add new option to support event-alarm * Sync devstack plugin with devstack:lib/ceilometer * Updated from global requirements * remove old nova_notifier processing code 5.0.0.0b3 --------- * restrict admin event access * Migrate the old snmp pollsters to new declarative pollster * Support to load pollsters extensions at runtime * Added snmp declarative hardware pollster * Requeuing event with workload_partitioning on publish failure * Event filtering for non-admin users * integration: fix typo * gnocchi: cleanup instance resource definition * Updated from global requirements * Adding pradk to MAINTAINERS * Adding liusheng to MAINTAINERS * Add index to metadata_hash column of resource table * Incorrect Links are updated * Removing unused dependency: discover * Use new location of subunit2html * Change tox default targets for quick use * Fixed identity trust event types * gnocchi: quote the resource_id in url * fix metadata for compute cpu notifications * support custom metadata * Move profiler meters to yaml * Control Events RBAC from policy.json * Events RBAC needs scoped token * make telemetry sample payloads dictionaries * Fix requeue process on event handling error * allow configurable pipeline partitioning * Keep the instance_type meta from polling and notification consistent * Add user_id,project_id traits to audit events * Change json path's to start with $. for consistency * Add validation tests for arithmetic, string and prefix expressions * Fix description for "Inapt spelling of 'MongoDB'" * Create conf directory during devstack install phase * support custom timestamp * Add cpu meters to yaml * Fix description for "Incorrect spelling of a word" * integration: add some new tests * Fix disable_non_metric_meters referencing * Update tests to reflect WSME 0.8 fixes * remove jsonpath-rw requirement * Do not use system config file for test * gnocchi: move to jsonpath_rw_ext * Updated from global requirements * Allow to run debug tox job for functional tests * Use jsonpath_rw_ext for meter/event definitions * preload jsonpath_rw parsers * integration test: adjusts timeout * integration test: failfast * Updated from global requirements * Avoid recording whole instance info in log * Fix dependency for doc build * Mark record_type in PaaS Event Format doc as optional * full multi-meter support * add flexible grouping key * Corrected test_fallback_meter_path test case * Add hypervisor inspector sanity check * handle list payloads in notifications * xenapi: support the session to "unix://local" * Introduce Guru Meditation Reports into Ceilometer * Use start status of coodinator in tooz * Fixed event requeuing/ack on publisher failure * Implement consuming metrics from Magnum * Avoid from storing samples with empty or not numerical volumes * use union all when building trait query * Fixed spelling error, retreive -> retrieve * Use min and max on IntOpt option types * Update install docs with gnocchi dispatcher info * Make it possible to run postgresql functional job * Revert "Remove version from os_auth_url in service_credentials" * Updated from global requirements * Use oslo_config PortOpt support * integration: chown ceilometer directory properly * add mandatory limit value to complex query list * add test to validate jsonpath * Remove version from os_auth_url in service_credentials * do not translate debug logs * Updated from global requirements * Grenade plugin using devstack plugin for ceilometer * remove alembic requirement * Convert instance, bandwidth and SwiftMiddleware meters * Change and move the workers options to corresponding service section * Drop the downgrade function of migration scripts * start rpc deprecation * support multiple-meter payloads * add poll history to avoid duplicate samples * Add Kilo release note reference * initialise opencontrail client in tests * Make ConnectionRetryTest more reliable * Correct thread handling in TranslationHook * Updated from global requirements * Correctly intialized olso config fixture for TestClientHTTPBasicAuth * Don't start up mongodb for unit test coverage * disable non-metric meter definitions * Cast Int64 values to float * Convert identity, sahara and volume to meters yaml * Enable entry points for new declarative meters * Fix for rgw still throwing errors * group pollsters by interval * Revert "Revert "remove instance: meter"" * api: fix alarm deletion and update * Fixes the kafka publisher * Sync devstack plugin with devstack:lib/ceilometer * integration: use the right user in gate * Imported Translations from Transifex * Initial separating unit and functional tests * Stop using openstack.common from keystoneclient * minimise scope of hmac mocking * Updated from global requirements * gnocchi: retry with a new token on 401 * Fix some gabbi tests * Improve comments in notification.py * mongo: fix last python3 bugs * postgres isolation level produces inconsistent reads * Masks messaging_urls in logs during debug mode * Corrected unit of snmp based harware disk and memory meters * Provide base method for inspect_memory_resident * Fix Python 3 issue in opendaylight client * Fix more tests on Python 3 * Remove the compute inspector choice restriction * [MongoDB] Refactor indexes for meter and resources * tests: add an integration test * Fix WSGI replacement_start_response() on Python 3 * gnocchi: reduce the number of patch to gnocchi API * Make the partition coordinator log more readable * Drop out-of-time-sequence rate of change samples 5.0.0.0b2 --------- * [MongoDB] Use a aggregate pipeline in statistics * Instance Cache in Node Discovery Pollster * Instance Caching * Imported Translations from Transifex * fix gnocchi resources yaml * Import the api opt group in gabbi fixture * Add a batch_polled_samples configuration item * Remove redundant comma * storage: deprecates mongodb_replica_set option * Improves send_test_data tools * Replace isotime() with utcnow() and isoformat() * distributed coordinated notifications * Imported Translations from Transifex * Close and dispose test database setup connections * Updated from global requirements * api: Redirect request to aodh if available * api: return 410 if only Gnocchi is enabled * Fix broken IPMI agent * add mandatory limit value to meter list * add mandatory limit value to resource list * add mandatory limit value to event list * Move gnocchi resources definition in yaml file * Send a notification per sample, do not batch * Handles dns.domain.exists event in Ceilometer * Pollsters now send notifications without doing transforms * Imported Translations from Transifex * Switch to the oslo_utils.fileutils * Updated from global requirements * Use choices for hypervisor_inspector option * The product name Vsphere should be vSphere * Add necessary executable permission * Store and restore the xtrace option in devstack plugin * gnocchi: Remove useless resources patching * add Trove(DBaaS) events * Set conf.gnocchi_dispatcher.url explicitly in tests * Declarative meters support * Stop the tests if backend hasn't started * Delay the start of the collector until after apache restart * Clean the re-implemented serializers in Ceilometer * monkey_patch thread in tests * make notifier default event publisher * Fix gnocchi DispatcherTest tests * Sort metric data before grouping and processing * Namespace functions in devstack plugin * Added valid values of operator to response body * gnocchi: fixes the instance flavor type * gnocchi dispatcher: fix typo in stevedore endpoint * Imported Translations from Transifex * Tolerate alarm actions set to None * Make ceilometer work correctly when hosted with a SCRIPT_NAME * Implementation of dynamically reloadable pipeline * fix log msg typo in api utils * Updated from global requirements * Add documentation about the usage of api-no-pipline * drop deprecated pipeline * Improve doc strings after changing method for index creation * set default limit to meter/sample queries * collector: fix test raising error * Remove test-requirements-py3.txt * remove unused event query * Create a devstack plugin for ceilometer * Add support for posting samples to notification-agent via API * restore long uuid data type * Revert "Add support for posting samples to notification-agent via API" * Update alarm history only if change in alarm property * test error log - catch dummy error * fix kafka tests from flooding logs * catch warnings from error tests * remove unused notifier * Add support for posting samples to notification-agent via API * Stop dropping deprecated tables while upgrade in mongodb and db2 * Add handler of sample creation notification * Remove the unused get_targets method of plugin base * Replaces methods deprecated in pymongo3.0 * add oslo.service options * Restricts pipeline to have unique source names * drop use of oslo.db private attribute * Fix oslo.service configuration options building * Add fileutils to openstack-common.conf * disable non-metric meters 5.0.0.0b1 --------- * Remove unnecessary executable permission * Imported Translations from Transifex * Switch to oslo.service * Remove unnecessary wrapping of transformer ExtentionManager * Port test_complex_query to Python 3 * Fix expected error message on Python 3 * Fix usage of iterator/list on Python 3 * Replaces ensure_index for create_index * pip has its own download cache by default * For sake of future python3 encode FakeMemcache hashes * Make acl_scenarios tests' keystonemiddleware cache work flexibly * Update version for Liberty * Gnocchi Dispatcher support in Ceilometer 5.0.0a0 ------- * Updated from global requirements * Fix alarm rest notifier logging to include severity * Remove useless execute bit on rst file * Fix unicode/bytes issues in API v2 tests * Fix script name in tox.ini for Elasticsearch * Fix the meter unit types to be consistent * tests: use policy_file in group oslo_policy * Fix publisher test_udp on Python 3 * Fix Ceph object store tests on Python 3 * Port IPMI to Python 3 * Port middleware to Python 3 * [elasticsearch] default trait type to string * Updated from global requirements * Lower down the range for columns which are being used as uuid * Sync with latest oslo-incubator * Fix testing of agent manager with tooz * Remove deprecated Swift middleware * add DNS events * Handle database failures on api startup * Fix more tests on Python 3 * Switch to using pbr's autodoc capability * Remove old oslo.messaging aliases * Remove useless versioninfo and clean ceilometer.conf git exclusion * Register oslo_log options before using them * Add running functional scripts for defined backend * Remove snapshot.update events as they are not sent * WSME version >=0.7 correctly returns a 405 * TraitText value restricted to max length 255 * Cause gabbi to skip on no storage sooner * Updated from global requirements * Move eventlet using commands into own directory * adjust alarm post ut code to adapt to upstream wsme * Disable rgw pollster when aws module not found * Fixes DiskInfoPollster AttributeError exception * remove useless log message * use oslo.log instead of oslo-incubator code * Port test_inspector to Python 3 * Fix usage of dictionary methods on Python 3 * Imported Translations from Transifex * Add oslo.vmware to Python 3 test dependencies * Optionally create trust for alarm actions * Remove iso8601 dependency * Enable test_swift_middleware on Python 3 * Enable more tests on Python 3 * Skip hbase tests on Python 3 * Clear useless exclude from flake8 ignore in tox * Remove pagination code * Stop importing print_function * Remove useless release script in tools * Remove useless dependency on posix_ipc * Remove exceute bit on HTTP dispatcher * Remove oslo.messaging compat from Havana * Fixing event types pattern for Role Noti. handler * Mask database.event_connection details in logs * Switch from MySQL-python to PyMySQL * Python 3: replace long with int * Python 3: Replace unicode with six.text_type * Python 3: generalize the usage of the six module * Update Python 3 requirements * Python 3: set __bool__() method on Namespace * Python 3: encode to UTF-8 when needed * Python 3: sort tables by their full name * Python 3: replace sys.maxint with sys.maxsize * Initial commit for functional tests * Update a test to properly anticipate HTTP 405 for RestController * proposal to add Chris Dent to Ceilometer core * rebuild event model only for database writes * cleanup problem events logic in event db storage * fix incorrect docstring for dispatcher * Imported Translations from Transifex * api: record severity change in alarm history * VMware: verify vCenter server certificate * Add hardware memory buffer and cache metrics * Make interval optional in pipeline * Improve ceilometer-api install documentation * empty non-string values are returned as string traits * Trait_* models have incorrect type for key * small change to development.rst file * Drop use of 'oslo' namespace package * [unittests] Increase agent module unittests coverage * stop mocking os.path in test_setup_events_default_config * Remove py33 tox target * made change to mod_wsgi.rst file * ensure collections created on upgrade * Fix raise error when run "tox -egenconfig" * Updated from global requirements * Fix None TypeError in neutron process notifications 2015.1.0 -------- * Have eventlet monkeypatch the time module * Have eventlet monkeypatch the time module * Add the function of deleting alarm history * Updated from global requirements * Fix valueerror when ceilometer-api start * Override gnocchi_url configuration in test * Move ceilometer/cli.py to ceilometer/cmd/sample.py * Fix valueerror when ceilometer-api start * remove deprecated partitioned alarm service * use message id to generate hbase unique key * gnocchi: fix typo in the aggregation endpoint * Release Import of Translations from Transifex * Fix Copyright date in docs * Replace 'metrics' with 'meters' in option and doc * use message id to generate hbase unique key * update .gitreview for stable/kilo * gnocchi: fix typo in the aggregation endpoint * broadcast data to relevant queues only * Imported Translations from Transifex * fix combination alarm with operator == 'or' * Updated from global requirements 2015.1.0rc1 ----------- * proposal to add ZhiQiang Fan to Ceilometer core * Open Liberty development * Fix a samples xfail test that now succeeds * Cosmetic changes for system architecture docs * Fix a issue for kafka-publisher and refactor the test code * pymongo 3.0 breaks ci gate * use oslo.messaging dispatch filter * Further mock adjustments to deal with intermittent failure * Adds support for default rule in ceilometer policy.json * Updated from global requirements * limit alarm actions * Use oslo_vmware instead of deprecated oslo.vmware * Remove 'samples:groupby' from the Capabilities list * Use old name of 'hardware.ipmi.node.temperature' * Revert "remove instance: meter" * Tweak authenticate event definition * Add project and domain ID to event definition for identity CRUD * Fix the event type for trusts * reset croniter to avoid cur time shift * Imported Translations from Transifex * Avoid a error when py27 and py-mysql tests run in sequence * Stop using PYTHONHASHSEED=0 in ceilometer tests * remove instance: meter * Added ipv6 support for udp publisher * Remove the unnecessary dependency to netaddr * Optimize the flow of getting pollster resources * support ability to skip message signing * Avoid conflict with existing gnocchi_url conf value * Using oslo.db retry decorator for sample create * alarm: Use new gnocchi aggregation API * collector: enable the service to listen on IPv6 * minimise the use of hmac * Typo in pylintrc * Ceilometer retrieve all images by 'all-tenants' * fix incorrect key check in swift notifications * support disabling profiler and http meters * ensure collections created on upgrade * Fix common misspellings * Updated from global requirements * refuse to post sample which is not supported * Enable collector to requeue samples when enabled * drop deprecated novaclient.v1_1 * exclude precise metaquery in query field 2015.1.0b3 ---------- * Imported Translations from Transifex * remove log message when process notification * Add gabbi tests for resources * Fix typos and format in docstrings in http dispatcher * add ability to dispatch events to http target * doc: fix class name * add ability to publish to multiple topics * make field and value attributes mandatory in API Query * Fix db2 upgrade in multi-thread run issue * Add memory.resident libvirt meter for Ceilometer * Update reference * Check the namespaces duplication for ceilometer-polling * Add gabbi tests to explore the Meter and MetersControllers * Imported Translations from Transifex * mysql doesn't understand intersect * order traits returned within events * add network, kv-store, and http events * Add support for additional identity events * Add a Kafka publisher as a Ceilometer publisher * Fix response POST /v2/meters/(meter_name) to 201 status * Attempt to set user_id for identity events * Switch to oslo.policy 0.3.0 * normalise timestamp in query * Add more power and thermal data * Updated from global requirements * Fix formatting error in licence * Added option to allow sample expiration more frequently * add option to store raw notification * use mongodb distinct * remove event_types ordering assumption * Add gabbi tests to cover the SamplesController * api: fix alarm creation if time_constraint is null * fix log message format in event.storage.impl_sqlalchemy * Remove duplications from docco * Tidy up clean-samples.yaml * Fix a few typos in the docs * use default trait type in event list query * fix wrong string format in libvirt inspector * create a developer section and refactor * Do not default pecan_debug to CONF.debug * Adding Gabbi Tests to Events API * fix config opts in objectstore.rgw * Updated from global requirements * support time to live on event database for sql backend * add an option to disable non-metric meters * add missing objectstore entry points * Initial gabbi testing for alarms * reorganise architecture page * Add ceph object storage meters * Use oslo_config choices support * fix inline multiple assignment * alarming: add gnocchi alarm rules * Protect agent startup from import errors in plugins * Revert "Add ceph object storage meters" * api: move alarm rules into they directory * compress events notes * Destroy fixture database after each gabbi TestSuite * Fix unittests for supporting py-pgsql env * Adding links API and CLI query examples * correct column types in events * Be explicit about using /tmp for temporary datafiles * Patch for fixing hardware.memory.used metric * Add ceph object storage meters * [PostgreSQL] Fix regexp operator * Add clean_exit for py-pgsql unit tests * modify events sql schema to reduce empty columns * Remove duplicated resource when pollster polling * check metering_connection attribute by default * unicode error in event converter * cleanup measurements page * api: add missing combination_rule field in sample * Fix test case of self-disabled pollster * update event architecture diagram * use configured max_retries and retry_interval for database connection * Updated from global requirements * Making utilization the default spelling * Add Disk Meters for ceilometer * correctly leave group when process is stopped * Updated from global requirements * enable oslo namespace check for ceilometer project * Add doc for version list API * Enabling self-disabled pollster * Use werkzeug to run the developement API server * Imported Translations from Transifex * switch to oslo_serialization * move non-essential libs to test-requirements * Validate default values in config * fix the value of query_spec.maxSample to advoid to be zero * clean up to use common service code * Add more sql test scenarios * [SQLalchemy] Add regex to complex queries * Fix duplication in sinks names * metering data ttl sql backend breaks resource metadata * Refactor unit test code for disk pollsters * start recording error notifications * Remove no_resource hack for IPMI pollster * Add local node resource for IPMI pollsters * Use stevedore to load alarm rules api * [MongoDB] Add regex to complex queries * Imported Translations from Transifex * support time to live on event database for MongoDB 2015.1.0b2 ---------- * split api.controllers.v2 * add elasticsearch events db * use debug value for pecan_debug default * Shuffle agents to send request * Updated from global requirements * Adds disk iops metrics implementation in Hyper-V Inspector * discovery: allow to discover all endpoints * Declarative HTTP testing for the Ceilometer API * add listener to pick up notification from ceilometermiddleware * Drop deprecated namespace for oslo.rootwrap * remove empty module tests.collector * Add disk latency metrics implementation in Hyper-V Inspector * add event listener to collector * add notifier publisher for events * enable event pipeline * Imported Translations from Transifex * deprecate swift middleware * sync oslo and bring in versionutils * Expose alarm severity in Alarm Model * Hyper-V: Adds memory metrics implementation * Remove mox from requirements * Fix IPMI unit test to cover different platforms * adjust import group order in db2 ut code * add event pipeline * remove unexistent module from doc/source/conf.py * Upgrade to hacking 0.10 * Remove the Nova notifier * Remove argparse from requirements * [MongoDB] Improves get_meter_statistics method * Fix docs repeating measuring units * [DB2 nosql] Create TIMESTAMP type index for 'timestamp' field * remove pytidylib and netifaces from tox.ini external dependency * Avoid unnecessary API dependency on tooz & ceilometerclient * Correct name of "ipmi" options group * Fix Opencontrail pollster according the API changes * enable tests.storage.test_impl_mongodb * Remove lockfile from requirements * Disable eventlet monkey-patching of DNS * Expose vm's metadata to metrics * Adding build folders & sorting gitignore * Disable proxy in unit test case of test_bin * Add Event and Trait API to document * Refactor ipmi agent manager * Use alarm's evaluation periods in sufficient test * Use oslo_config instead of deprecated oslo.config * Avoid executing ipmitool in IPMI unit test * Updated from global requirements * Add a direct to database publisher * Fixed MagnetoDB metrics title * Imported Translations from Transifex * Fix incorrect test case name in test_net.py * Updated from global requirements * notification agent missing CONF option * switch to oslo_i18n * Use right function to create extension list for agent test * Imported Translations from Transifex * Add an exchange for Zaqar in profiler notification plugin * Remove unused pecan configuration options * Updated from global requirements * Use oslo_utils instead of deprecated oslo.utils * Match the meter names for network services * stop using private timeutils attribute * Update measurement docs for network services * Catch exception when evaluate single alarm * Return a meaningful value or raise an excpetion for libvirt * Imported Translations from Transifex * make transformers optional in pipeline * Added metering for magnetodb * Add release notes URL for Juno * Fix release notes URL for Icehouse * remove unnecessary str method when log messages * Revert "Remove Sphinx from py33 requirements" * untie pipeline manager from samples * reset listeners on agent refresh * Remove inspect_instances method from virt * Optimize resource list query * Synchronize Python 3 requirements * Remove unnecessary import_opt|group * Add test data generator via oslo messaging * Check to skip to poll and publish when no resource * Add oslo.concurrency module to tox --env genconfig * add glance events * add cinder events * Manual update from global requirements * Add cmd.polling.CLI_OPTS to option list * Ignore ceilometer.conf * Switch to oslo.context library 2015.1.0b1 ---------- * Revert "Skip to poll and publish when no resources found" * Added missing measurements and corrected errors in doc * Remove Sphinx from py33 requirements * Clean up bin directory * Improve tools/make_test_data.sh correctness * ensure unique pipeline names * implement notification coordination * Make methods static where possible (except openstack.common) * Fix docs to suit merged compute/central agents concept * Drop anyjson * Move central agent code to the polling agent module * RBAC Support for Ceilometer API Implementation * [SQLalchemy] Add groupby ability resource_metadata * Improve links in config docs * Make LBaaS total_connections cumulative * remove useless looping in pipeline * Encompassing one source pollsters with common context * Modify tests to support ordering of wsme types * Make compute discovery pollster-based, not agent-level * Add docs about volume/snapshot measurements * Port to graduated library oslo.i18n * Retry to connect database when DB2 or mongodb is restarted * Updated from global requirements * Standardize timestamp fields of ceilometer API * Workflow documentation is now in infra-manual * Add alarm_name field to alarm notification * Updated from global requirements * Rely on VM UUID to fetch metrics in libvirt * Imported Translations from Transifex * Initializing a longer resource id in DB2 nosql backend * Sync oslo-incubator code to latest * ensure unique list of consumers created * fix import oslo.concurrency issue * Add some rally scenarios * Do not print snmpd password in logs * Miniscule typo in metering_connection help string * add http dispatcher * [MongoDB] Add groupby ability on resource_metadata * [MongoDB] Fix bug with 'bad' chars in metadatas keys * Override retry_interval in MongoAutoReconnectTest * Exclude tools/lintstack.head.py for pep8 check * Add encoding of rows and qualifiers in impl_hbase * Database.max_retries only override on sqlalchemy side * Support to capture network services notifications * Internal error with period overflow * Remove Python 2.6 classifier * Enable pep8 on ./tools directory * Imported Translations from Transifex * Fixes Hyper-V Inspector disk metrics cache issue * fix swift middleware parsing * Fix order of arguments in assertEqual * Updated from global requirements * Adapting pylint runner to the new message format * Validate AdvEnum & return an InvalidInput on error * add sahara and heat events * add keystone events to definitions * Add timeout to all http requests * [MongoDB] Refactor time to live feature * transform samples only when transformers exist * Updated from global requirements * Remove module not really used by Ceilometer * Switch to oslo.concurrency * Skip to poll and publish when no resources found * Change event type for identity trust notifications * Add mysql and postgresql in tox for debug env * Add new notifications types for volumes/snapshots * Add encoding to keys in compute_signature * Tests for system and network aggregate pollsters * Add bandwidth to measurements * Fix wrong example of capabilities * Correct the mongodb_replica_set option's description * Alarms listing based on "timestamp" * Use 'pg_ctl' utility to start and stop database * Correct alarm timestamp field in unittest code * Refactor kwapi unit test * Remove duplicated config doc * VMware: Enable VMware inspector to support any port * Clean event method difinition in meter storage base * Fix some nits or typos found by chance * Add Sample ReST API path in webapi document * Enable filter alarms by their type * Fix storage.hbase.util.prepare_key() for 32-bits system * Add event storage for test_hbase_table_utils * Add per device rate metrics for instances * Fix hacking rule H305 imports not grouped correctly * Add __repr__ method for sample.Sample * remove ordereddict requirement * Improve manual.rst file * Imported Translations from Transifex * Fix columns migrating for PostgreSQL * Updated from global requirements * Updated from global requirements * [MongoDB] Fix bug with reconnection to new master node * Updated from global requirements * support request-id * Update coverage job to references correct file * remove reference to model in migration * Use oslo_debug_helper and remove our own version * Allow collector service database connection retry * refresh ceilometer architecture documentation * Edits assert methods * Adds memory stats meter to libvirt inspector * Edits assert methods * Edits assert methods * Edits assert methods * Edits assert method * Imported Translations from Transifex * Imported Translations from Transifex * Updated from global requirements * add script to generate test event data * Handle poorly formed individual sensor readings * refactor hbase storage code * Avoid clobbering existing class definition * Hoist duplicated AlarmService initialization to super * Clarify deprecation comment to be accurate * Work toward Python 3.4 support and testing 2014.2 ------ * Fix recording failure for system pollster * sync and clean up oslo * Add missing notification options to the documentation * Add missing alarm options to the documentation * Add oslo.db to config generator * Add missed control exchange options to the documentation * Add coordination related options to the documentation * Add missing collector options to the documentation * switch to oslo-config-generator * Edit docs for docs.opentack.org/developer/ * Add oslo.db to config generator * Fix signature validation failure when using qpid message queue * clean capabilities * move db2 and mongo driver to event tree * move sql event driver to event tree * move hbase event driver to event tree * Sets default encoding for PostgreSQL testing * update database dispatcher to use events db * Add role assignment notifications for identity * add mailmap to avoid dup of authors * Add user_metadata to network samples * Fix recording failure for system pollster 2014.2.rc2 ---------- * Manually updated translations * Updated from global requirements * Creates one database per sql test * Adds pylint check for critical error in new patches * Fix neutron client to catch 404 exceptions * Fix OrderedDict usage for Python 2.6 * Include a 'node' key and value in ipmi metadata * clean path in swift middleware * Implement redesigned separator in names of columns in HBase * [HBase] Add migration script for new row separate design * Imported Translations from Transifex * Include a 'node' key and value in ipmi metadata * Updated from global requirements * Run unit tests against PostgreSQL * create skeleton files for event storage backends * Imported Translations from Transifex * isolate event storage models * Fix neutron client to catch 404 exceptions * Run unit tests against MySQL * Updated from global requirements * Correct JSON-based query examples in documentation * Open Kilo development * Add cfg.CONF.import_group for service_credentials * Fix OrderedDict usage for Python 2.6 * clean path in swift middleware 2014.2.rc1 ---------- * Partition static resources defined in pipeline.yaml * Per-source separation of static resources & discovery * dbsync: Acknowledge 'metering_connection' option * Fix bug in the documentation * Use oslo.msg retry API in rpc publisher * Describe API versions * Change compute agent recurring logs from INFO to DEBUG * Fix bug with wrong bool opt value interpolation * [HBase] Improves speed of unit tests on real HBase backend * Imported Translations from Transifex * Removed unused abc meta class * update references to auth_token middleware * clean up swift middleware to avoid unicode errors * [HBase] Catch AlreadyExists error in Connection upgrade * Use None instead of mutables in method params default values * Updated from global requirements * Enable to get service types from configuration file * test db2 driver code * Docs: Add description of pipeline discovery section * Typo "possibilites" should be "possibilities" * Modified docs to update DevStack's config filename * Add an API configuration section to docs * Tune up mod_wsgi settings in example configuration * Allow pecan debug middleware to be turned off * Provide __repr__ for SampleFilter * Eliminate unnecessary search for test cases * Switch to a custom NotImplementedError * minimise ceilometer memory usage * Partition swift pollster resources by tenant * Add IPMI pollster * Add IPMI support * Stop using intersphinx * Use central agent manager's keystone token in discoveries * Handle invalid JSON filters from the input gracefully * Sync jsonutils for namedtuple_as_object fix * ceilometer spamming syslog * Timestamp bounds need not be tight (per ceilometer 1288372) * Allow to pass dict from resource discovery * fix network discovery meters * switch to sqlalchemy core * Imported Translations from Transifex * Improve the timestamp validation of ceilometer API * Update docs with Sahara notifications configuration * Migrate the rest of the central agent pollsters to use discoveries * Add documentation for implemented identity meters * Fix tests with testtools>=0.9.39 * Document the standard for PaaS service notifications * Returns 401 when unauthorized project access occurs * Adding another set of hardware metrics * normalise resource data 2014.2.b3 --------- * warn against sorting requirements * Add validate alarm_actions schema in alarm API * Fix help strings * Imported Translations from Transifex * Switch partitioned alarm evaluation to a hash-based approach * Central agent work-load partitioning * collector: Allows to requeue a sample * Typo fixed * Switch to oslo.serialization * Document pipeline publishers configuration * Alarm: Use stevedore to load the service class * Enhance compute diskio tests to handle multi instance * Adding comparison operators in query for event traits * XenAPI support: Update measurements documentation * update requirements * add documentation for setting up api pipeline * Permit usage of notifications for metering * XenAPI support: Disk rates * XenAPI support: Changes for networking metrics * XenAPI support: Memory Usage * XenAPI support: Changes for cpu_util * XenAPI support: List the instances * Rebase hardware pollsters to use new inspector interface * Switch to use oslo.db * Remove oslo middleware * Adding quotas on alarms * Add an exchange for Trove in profiler notification plugin * Simplify chained comparisons * In-code comments should start with `#`, not with `"""` * Remove redundant parentheses * skip polls if service is not registered * re-add hashseed to avoid gate error * Switch to oslo.utils * Switch to oslotest * Handle sqlalchemy connection strings with drivers * Rewrite list creation as a list literal * Rewrite dictionary creation as a dictionary literal * Triple double-quoted strings should be used for docstrings * Add upgrading alarm storage in dbsync * Improving of configuration.rst * Fix typos in transformer docstrings * Update tox.ini pep8 config to ignore i18n functions * Added new hardware inspector interface * compute: fix wrong test assertion * sync olso-incubator code * VMware: Support secret host_password option * refactor filter code in sql backend * Support for per disk volume measurements * Use a FakeRequest object to test middleware * Imported Translations from Transifex * Improve api_paste_config file searching * [Hbase] Add column for source filter in _get_meter_samples * Issue one SQL statement per execute() call * Allow tests to run outside tox * [HBase] Refactor hbase.utils * Set page size when Glance API request is called * Adding init into tools folder * Enhancing the make_test_data script * correct DB2 installation supported features documentation * Avoid duplication of discovery for multi-sink sources * Improve performance of libvirt inspector requests * Documented Stevedore usage and source details * Add notifications for identity authenticate events * Add message translate module in vmware inspector * Handle Cinder attach and detach notifications * [HBase] Improve uniqueness for row in meter table * Doc enhancement for API service deployment with mod_wsgi * Update documentation for new transformer * Add the arithmetic transformer endpoint to setup.cfg * Imported Translations from Transifex * Fix unit for vpn connection metric * Debug env for tox * Change spelling mistakes * Use auth_token from keystonemiddleware * Fix dict and set order related issues in tests * Fix listener for update.start notifications * Sahara integration with Ceilometer * Add notifications for identity CRUD events * Extracting make_resource_metadata method * Fix make_test_data tools script * Add cumulative and gauge to aggregator transformer * Enable some tests against py33 * Remove --tmpdir from mktemp * Replace dict.iteritems() with six.iteritems(dict) * Replace iterator.next() with next(iterator) * Fix aggregator flush method * Automatic discovery of TripleO Overcloud hardware * Set python hash seed to 0 in tox.ini * Don't override the original notification message * Remove ConnectionProxy temporary class * Move sqlalchemy alarms driver code to alarm tree * basestring replaced with six.string_types * Correct misspelled words 2014.2.b2 --------- * Add retry function for alarm REST notifier * Move hbase alarms driver code to alarm tree * Update measurement docs for FWaaS * Update measurement docs for VPNaaS * Follow up fixes to network services pollsters * Updated from global requirements * Implement consuming ipmi notifications from Ironic * Support for metering FWaaS * Adds Content-Type to alarm REST notifier * Multi meter arithmetic transformer * Remove redudent space in doc string * Use None instead of mutables in test method params defaults * Add support for metering VPNaaS * Use resource discovery for Network Services * Change of get_events and get_traits method in MongoDB and Hbase * Fix two out-dated links in doc * Move log alarms driver code to alarm tree * Separate the console scripts * clean up event model * improve expirer performance for sql backend * Move mongodb/db2 alarms driver code to alarm tree * Allow to have different DB for alarm and metering * Replace datetime of time_constraints by aware object * Sync oslo log module and its dependencies * Use hmac.compare_digest to compare signature * Add testcase for multiple discovery-driven sources * Fixes aggregator transformer timestamp and user input handling * Improves pipeline transformer documentation * Fix incorrect use of timestamp in test * Add keystone control exchange * Fix call to meter-list in measurements doc * Remove redundant parentheses * [Mongodb] Implement events on Mongodb and DB2 * Fix typos in code comments & docstrings * Make the error message of alarm-not-found clear * Fix SQL exception getting statitics with metaquery * Remove docutils pin * update default_log_levels set by ceilometer * Fix annoying typo in partition coordinator test * Transform sample_cnt type to int * Remove useless sources.json * Fix H405 violations and re-enable gating * Fix H904 violations and re-enable gating * Fix H307 violations and re-enable gating * Fix the section name in CONTRIBUTING.rst * Added osprofiler notifications plugin * Improve a bit performance of Ceilometer * Revert "Align to openstack python package index mirror" * Fix aggregator _get_unique_key method * Remove meter hardware.network.bandwidth.bytes * Fix F402 violations and re-enable gating * Fix E265 violations and re-enable gating * Fix E251 violations and re-enable gating * Fix E128 violations and re-enable gating * Fix E126,H104 violations and re-enable gating * Bump hacking to 0.9.x * Fixed various import issues exposed by unittest * use urlparse from six * clean up sample index * Fix HBase available capabilities list * Updated from global requirements * VMware:Update the ceilometer doc with VMware opts * Handle non-ascii character in meter name * Add log output of "x-openstack-request-id" from nova * Imported Translations from Transifex * fix StringIO errors in unit test * Fix hacking rule 302 and enable it * Imported Translations from Transifex * sync oslo code * Fixes ceilometer-compute service start failure * Reenables the testr per test timeout * Avoid reading real config files in unit test * Clean up oslo.middleware.{audit,notifier} * Use hacking from test-requirements * Splits hbase storage code base * Splits mongo storage code base * Separate alarm storage models from other models * Iterates swift response earlier to get the correct status * Fix messaging.get_transport caching * Fix method mocked in a test * Don't keep a single global TRANSPORT object * Clean up .gitignore * Fix Sphinx directive name in session.py * Fix list of modules not included in auto-gen docs * Downgrade publisher logging to debug level again 2014.2.b1 --------- * remove default=None for config options * [HBase] get_resource optimization * Fix incorrect trait initialization * Remove unused logging in tests * Revert "Fix the floatingip pollster" * Remove low-value logging from publication codepath * Fix LBaaS connection meter docs * Fix the meter type for LB Bytes * Adding alarm list filtering by state and meter * Adds caches for image and flavor in compute agent * [HBase] Implement events on HBase * Skipping central agent pollster when keystone not available * Respect $TMPDIR environment variable to run tests * Fixed unit test TestRealNotification * Update Measurement Docs for LBaaS * Metering LoadBalancer as a Service * Removes per test testr timeout * Change pipeline_manager to instance attribute in hooks * Change using of limit argument in get_sample * Refactor tests to remove direct access to test DBManagers * Fix notification for NotImplemented record_events * Add missing explicit cfg option import * Fix ceilometer.alarm.notifier.trust import * Use TYPE_GAUGE rather than TYPE_CUMULATIVE * Update doc for sample config file issue * Corrects a flaw in the treatment of swift endpoints * use LOG instead of logger as name for the Logger object * Fix doc gate job false success * Improve performance of api requests with hbase scan * Add new 'storage': {'production_ready': True} capability * Clean tox.ini * Remove (c) and remove unnecessary encoding lines * Fix testing gate due to new keystoneclient release * Ignore the generated file ceilometer.conf.sample * Update the copyright date in doc * Updated from global requirements * reconnect to mongodb on connection failure * refactor sql backend to improve write speed * Don't rely on oslomsg configuration options * replaced unicode() with six.text_type() * Synced jsonutils from oslo-incubator * Fix the floatingip pollster * Fix project authorization check * Update testrepository configuration * Implemented metering for Cinder's snapshots * Use joins instead of subqueries for metadata filtering * Use None instead of mutables in method params defaults * Remove all mostly untranslated PO files * switch SplitResult to use six * Remove unused db code due to api v1 drop * Updated from global requirements * oslo.messaging context must be a dict * Drop deprecated api v1 * Fix network notifications of neutron bulk creation * mongo: remove _id in inserted alarm changes * Clean up openstack-common.conf * Revert "oslo.messaging context must be a dict" * Correct class when stopping partitioned alarm eval svc * oslo.messaging context must be a dict * Corrections of spelling, rephrasing for clarity * Adapt failing tests for latest wsme version * Removed StorageEngine class and it's hierarchy * Correcting formatting and adding period in measurement doc * Initialize dispatcher manager in event endpoint * Replaced CONF object with url in storage engine creation * Synced jsonutils from oslo-incubator * Remove gettextutils._ imports where they are not used * Remove "# noqa" leftovers for gettextutils._ * transformer: Add aggregator transformer * Remove conversion debug message * Fix the return of statistic with getting no sample * Remove eventlet.sleep(0) in collector tests * Don't allow queries with 'IN' predicate with an empty sequence * Check if samples returned by get_sample_data are not None * Opencontrail network statistics driver * Add a alarm notification using trusts * Replace hard coded WSGI application creation * Describe storage backends in the collector installation guide * Made get_capabilities a classmethod instead of object method * Disable reverse dns lookup * Consume notif. from multiple message bus * Use NotificationPlugin as an oslo.msg endpoint * Improve combination rule validation * Remove ceilometer.conf.sample * Use known protocol scheme in keystone tests * cleanup virt pollster code * Add encoding argument to deserialising udp packets in collector * Made get_engine method module-private * Make entities (Resource, User, Project) able to store lists * Remove duplicate alarm from alarm_ids * More accurate meter name and unit for host load averages * Replace oslo.rpc by oslo.messaging * Fix a response header bug in the error middleware * Remove unnecessary escape character in string format * Optimize checks to set image properties in metadata * fix statistics query in postgres * Removed useless code from __init__ method * Refactored fake connection URL classes * Replace assert statements with assert methods * Removes direct access of timeutils.override_time * Disable specifying alarm itself in combination rule * Include instance state in metadata * Allowed nested resource metadata in POST'd samples * Sync oslo-incubator code * Updated from global requirements * Refactor the DB implementation of Capabilities API * Fix Jenkins translation jobs * Align to openstack python package index mirror * User a more accurate max_delay for reconnects * Open Juno development 2014.1.rc1 ---------- * Imported Translations from Transifex * Add note on aggregate duplication to API docco * Use ConectionPool instead of one Connection in HBase * remove dump tables from previous migrations * De-dupe selectable aggregate list in statistics API * ensure dispatcher service is configured before rpc * improve performance of resource-list in sql * SSL errors thrown with Postgres on multi workers * Remove escape character in string format * Verify user/project ID for alarm created by non-admin user * enable a single worker by default * Fix ceilometer.conf.sample mismatch * Metadata in compute.instance.exists fix * Fix order of arguments in assertEquals * Documenting hypervisor support for nova meters * Ensure idempotency of cardinality reduction in mongo * VMware vSphere: Improve the accuracy of queried samples * Use swob instead of webob in swift unit tests * Disable oslo.messaging debug logs * Fix validation error for invalid field name in simple query * fix create_or_update logic to avoid rollbacks * Avoid swallowing AssertionError in test skipping logic * Fix hardware pollster to inspect multiple resources * spawn multiple workers in services * Install global lazy _() * Fixes Hyper-V metrics units * Ensure intended indices on project_id are created for mongo * Fix the type of the disk IO rate measurements * Change the sample_type from tuple to string * Fix order of arguments in assertEquals * Ensure alarm rule conform to alarm type * insecure flag added to novaclient * Fixes duplicated names in alarm time constraints * Use the list when get information from libvirt * Eventlet monkeypatch must be done before anything * 028 migration script incorrectly skips over section * Fix bug in get_capabilities behavior in DB drivers * Added documentation for selectable aggregates * Make sure use IPv6 sockets for ceilometer in IPv6 environment * VMware vSphere: Bug fixes * Ensure insecure config option propagated by alarm evaluator * Fix order of arguments in assertEquals * Fix order of arguments in assertEquals * Fix order of arguments in assertEquals * Rationalize get_resources for mongodb * Ensure insecure config option propagated by alarm service * add host meters to doc * Add field translation to complex query from OldSample to Sample * Extend test case to cover old alarm style conversion * Updated doc with debug instructions * Refactored the way how testscenarios tests are run * Corrected the sample names in hardware pollsters * Prevent alarm_id in query field of getting history * Make ceilometer work with sqla 0.9.x * Implements monitoring-network-from-opendaylight * Add user-supplied arguments in log_handler * VMware vSphere support: Disk rates * Fix updating alarm can specify existing alarm name * Changes for networking metrics support for vSphere * VMware vSphere: Changes for cpu_util * VMware vSphere support: Memory Usage * Fix broken statistics in sqlalchemy * Fixes Hyper-V Inspector network metrics values * Set storage engine for the trait_type table * Enable monkeypatch for select module * Rename id to alarm_id of Alarm in SqlAlchemy * Fix some spelling mistakes and a incorrect url * Skip central agent interval_task when keystone fails 2014.1.b3 --------- * Ensure user metadata mapped for instance notifications * Per pipeline pluggable resource discovery * Wider selection of aggregates for sqlalchemy * Wider selection of aggregates for mongodb * Adds time constraints to alarms * Remove code duplication Part 3 * Decouple source and sink configuration for pipelines * Selectable aggregate support in mongodb * Selectable aggregation functions for statistics * Add simple capabilities API * Removed global state modification by api test * VMware vSphere support: Performance Mgr APIs * Fix typo * move databases to test requirements * Make recording and scanning data more determined * Implements "not" operator for complex query * Implements metadata query for complex query feature * Alarms support in HBase Part 2 * Alarm support in HBase Part 1 * Remove unused variable * Added hardware pollsters for the central agent * Added hardware agent's inspector and snmp implementation * Updated from global requirements * Pluggable resource discovery for agents * Remove code duplication Part 2 * Imported Translations from Transifex * remove audit logging on flush * Tolerate absent recorded_at on older mongo/db2 samples * api: export recorded_at in returned samples * Fix the way how metadata is stored in HBase * Set default log level of iso8601 to WARN * Sync latest config file generator from oslo-incubator * Fix typo on testing doc page * Remove code duplication * sample table contains redundant/duplicate data * rename meter table to sample * storage: store recording timestamp * Fixed spelling error in Ceilometer * Adds doc string to query validate functions in V2 API * Updated from global requirements * Remove code that works around a (now-resolved) bug in pecan * Fix missing source field content on /v2/samples API * Refactor timestamp existence validation in V2 API * Use the module units to refer bytes type * sync units.py from oslo to ceilometer * Add comments for _build_paginate_query * Implements monitoring-network * Handle Heat notifications for stack CRUD * Alembic migrations not tested * Modify the discription of combination alarm * check domain state before inspecting nics/disks * Adds gettextutils module in converter * Keep py3.X compatibility for urllib.urlencode * Added missing import * Removed useless prints that pollute tests log * Implements in operator for complex query functionality * Implements field validation for complex query functionality * allow hacking to set dependencies * Implements complex query functionality for alarm history * Implements complex query functionality for alarms * Remove None for dict.get() * Replace assertEqual(None, *) with assertIsNone in tests * Update notification_driver * Switch over to oslosphinx * Fix some flaws in ceilometer docstrings * Rename Openstack to OpenStack * Remove start index 0 in range() * Updated from global requirements * Remove blank line in docstring * Use six.moves.urllib.parse instead of urlparse * Propogate cacert and insecure flags to glanceclient * Test case for creating an alarm without auth headers * Refactored run-tests script * Implements complex query functionality for samples * fix column name and alignment * Remove tox locale overrides * Updated from global requirements * Adds flavor_id in the nova_notifier * Improve help strings * service: re-enable eventlet just for sockets * Fixes invalid key in Neutron notifications * Replace BoundedInt with WSME's IntegerType * Replace 'Ceilometer' by 'Telemetry' in the generated doc * Doc: Add OldSample to v2.rst * Fixing some simple documentation typos * Updated from global requirements * Fix for a simple typo * Replace 'a alarm' by 'an alarm' * Move ceilometer-send-counter to a console script * sync oslo common code * Handle engine creation inside of Connection object * Adds additional details to alarm notifications * Fix formating of compute-nova measurements table * Fix string-to-boolean casting in queries * nova notifier: disable tests + update sample conf * Update oslo * Refactored session access * Fix the py27 failure because of "ephemeral_key_uuid" error * Correct a misuse of RestController in the Event API * Fix docs on what an instance meter represents * Fix measurement docs to correctly represent Existance meters * samples: fix test case status code check * Replace non-ascii symbols in docs * Use swift master * Add table prefix for unit tests with hbase * Add documentation for pipeline configuration * Remove unnecessary code from alarm test * Updated from global requirements * Use stevedore's make_test_instance * use common code for migrations * Use explicit http error code for api v2 * Clean .gitignore * Remove unused db engine variable in api * Revert "Ensure we are not exhausting the sqlalchemy pool" * eventlet: stop monkey patching * Update dev docs to include notification-agent * Change meter_id to meter_name in generated docs * Correct spelling of logger for dispatcher.file * Fix some typos in architecture doc * Drop foreign key contraints of alarm in sqlalchemy * Re-enable lazy translation * Sync gettextutils from Oslo * Fix wrong doc string for meter type * Fix recursive_keypairs output * Added abc.ABCMeta metaclass for abstract classes * Removes use of timeutils.set_time_override 2014.1.b2 --------- * tests: kill all started processes on exit * Exclude weak datapoints from alarm threshold evaluation * Move enable_acl and debug config to ceilometer.conf * Fix the Alarm documentation of Web API V2 * StringIO compatibility for python3 * Set the SQL Float precision * Convert alarm timestamp to PrecisionTimestamp * use six.move.xrange replace xrange * Exit expirer earlier if db-ttl is disabled * Added resources support in pollster's interface * Improve consistency of help strings * assertTrue(isinstance) replace by assertIsInstance * Return trait type from Event api * Add new rate-based disk and network pipelines * Name and unit mapping for rate_of_change transformer * Update oslo * Remove dependencies on pep8, pyflakes and flake8 * Implement the /v2/samples/ API * Fix to handle null threshold_rule values * Use DEFAULT section for dispatcher in doc * Insertion in HBase should be fixed * Trivial typo * Update ceilometer.conf.sample * Fix use the fact that empty sequences are false * Remove unused imports * Replace mongo aggregation with plain ol' map-reduce * Remove redundant meter (name,type,unit) tuples from Resource model * Fix work of udp publisher * tests: pass /dev/null as config for mongod * requirements: drop netaddr * tests: allow to skip if no database URL * Fix to tackle instances without an image assigned * Check for pep8 E226 and E24 * Fixed spelling mistake * AlarmChange definition added to doc/source/webapi/v2.rst * 1st & last sample timestamps in Resource representation * Avoid false negatives on message signature comparison * cacert is not picked up correctly by alarm services * Change endpoint_type parameter * Utilizes assertIsNone and assertIsNotNone * Add missing gettextutils import to ceilometer.storage.base * Remove redundant code in nova_client.Client * Allow customized reseller_prefix in Ceilometer middleware for Swift * Fix broken i18n support * Empty files should no longer contain copyright * Add Event API * Ensure we are not exhausting the sqlalchemy pool * Add new meters for swift * Sync config generator workaround from oslo * storage: factorize not implemented methods * Don't assume alarms are returned in insert order * Correct env variable in file oslo.config.generator.rc * Handle the metrics sent by nova notifier * Add a wadl target to the documentation * Sync config generator from oslo-incubator * Convert event timestamp to PrecisionTimestamp * Add metadata query validation limitation * Ensure the correct error message is displayed * Imported Translations from Transifex * Move sphinxcontrib-httpdomain to test-requirements * Ensure that the user/project exist on alarm update * api: raise ClientSideError rather than ValueError * Implement the /v2/sample API * service: fix service alive checking * Oslo sync to recover from db2 server disconnects * Event Storage Layer * config: specify a template for mktemp * test code should be excluded from test coverage summary * doc: remove note about Nova plugin framework * doc: fix formatting of alarm action types * Updated from global requirements * Add configuration-driven conversion to Events * add newly added constraints to expire clear_expired_metering_data * fix unit * Add import for publisher_rpc option * add more test cases to improve the test code coverage #5 * Create a shared queue for QPID topic consumers * Properly reconnect subscribing clients when QPID broker restarts * Don't need session.flush in context managed by session * sql migration error in 020_add_metadata_tables 2014.1.b1 --------- * Remove rpc service from agent manager * Imported Translations from Transifex * organise requirements files * Add a Trait Type model and db table * No module named MySQLdb bug * Add a note about permissions to ceilometer logging directory * sync with oslo-incubator * Rename OpenStack Metering to OpenStack Telemetry * update docs to adjust for naming change * Add i18n warpping for all LOG messages * Imported Translations from Transifex * Removed unused method in compute agent manger * connection is not close in migration script * Fixed a bug in sql migration script 020 * Fixed nova notifier test * Added resources definition in the pipeline * Change metadata_int's value field to type bigint * Avoid intermittent integrity error on alarm creation * Simplify the dispatcher method prototype * Use map_method from stevedore 0.12 * Remove the collector submodule * Move dispatcher a level up * Split collector * Add a specialized Event Type model and db table * Remove old sqlalchemy-migrate workaround * Revert "Support building wheels (PEP-427)" * full pep8 compliance (part 2) * Selectively import RPC backend retry config * Fixes Hyper-V Inspector disk metrics bug * Imported Translations from Transifex * full pep8 compliance (part1) * Replace mox with mock in alarm,central,image tests * Stop ignoring H506 errors * Update hacking for real * Replace mox with mock in tests.collector * Replace mox with mock in publisher and pipeline * Replace mox with mock in novaclient and compute * Remove useless defined Exception in tests * Support building wheels (PEP-427) * Fixes Hyper-V Inspector cpu metrics bug * Replace mox with mock in tests.storage * Document user-defined metadata for swift samples * Replace mox with mock in energy and objectstore * Updated from global requirements * Replace mox with mock in tests.api.v2 * Refactor API error handling * make record_metering_data concurrency safe * Move tests into ceilometer module * Replace mox with mock in tests.api.v1 * Replace mox with mock in tests.api.v2.test_compute * Corrected import order * Use better predicates from testtools instead of plain assert * Stop using openstack.common.exception * Replace mox with mock in tests.network * Replace mox with mocks in test_inspector * Fix failing nova_tests tests * Replace mox with mocks in tests.compute.pollsters * Add an insecure option for Keystone client * Sync log from oslo * Cleanup tests.publisher tests * mongodb, db2: do not print full URL in logs * Use wsme ClientSideError to handle unicode string * Use consistant cache key for swift pollster * Fix the developer documentation of the alarm API * Fix the default rpc policy value * Allow Events without traits to be returned * Replace tests.base part8 * Replace tests.base part7 * Replace tests.base part6 * Imported Translations from Transifex * Imported Translations from Transifex * Sync log_handler from Oslo * Don't use sqlachemy Metadata as global var * enable sql metadata query * Replace tests.base part5 * Replace tests.base part4 * Imported Translations from Transifex * Updated from global requirements * Fix doc typo in volume meter description * Updated from global requirements * Add source to Resource API object * compute: virt: Fix Instance creation * Fix for get_resources with postgresql * Updated from global requirements * Add tests when admin set alarm owner to its own * Replace tests.base part3 * Replace tests.base part2 * Replace tests.base part1 * Fix wrong using of Metadata in 15,16 migrations * api: update for WSME 0.5b6 compliance * Changes FakeMemcache to set token to expire on utcnow + 5 mins * Change test case get_alarm_history_on_create * Change alarm_history.detail to text type * Add support for keystoneclient 0.4.0 * Ceilometer has no such project-list subcommand * Avoid leaking admin-ness into combination alarms * Updated from global requirements * Avoid leaking admin-ness into threshold-oriented alarms * Update Oslo * Set python-six minimum version * Ensure combination alarms can be evaluated * Ensure combination alarm evaluator can be loaded * Apply six for metaclass * add more test cases to improve the test code coverage #6 * Update python-ceilometerclient lower bound to 1.0.6 * Imported Translations from Transifex * add more test cases to improve the test code coverage #4 2013.2.rc1 ---------- * db2 does not allow None as a key for user_id in user collection * Start Icehouse development * Imported Translations from Transifex * Disable lazy translation * Add notifications for alarm changes * Updated from global requirements * api: allow alarm creation for others project by admins * assertEquals is deprecated, use assertEqual * Imported Translations from Transifex * update alarm service setup in dev doc * Add bug number of some wsme issue * api: remove useless comments * issue an error log when cannot import libvirt * add coverage config file to control module coverage report * tests: fix rounding issue in timestamp comparison * api: return 404 if a alarm is not found * remove locals() for stringformat * add more test cases to improve the test code coverage #3 * Remove extraneous vim configuration comments * Return 401 when action is not authorized * api: return 404 if a resource is not found * keystone client changes in AuthProtocol made our test cases failing * Don't load into alarms evaluators disabled alarms * Remove MANIFEST.in * Allow to get a disabled alarm * Add example with return values in API v2 docs * Avoid imposing alembic 6.0 requirement on all distros * tests: fix places check for timestamp equality * Don't publish samples if resource_id in missing * Require oslo.config 1.2.0 final * Don't send unuseful rpc alarm notification * service: check that timestamps are almost equals * Test the response body when deleting a alarm * Change resource.resource_metadata to text type * Adding region name to service credentials * Fail tests early if mongod is not found * add more test cases to improve the test code coverage #2 * add more test cases to improve the test code coverage #1 * Imported Translations from Transifex * Replace OpenStack LLC with OpenStack Foundation * Use built-in print() instead of print statement * Simple alarm partitioning protocol based on AMQP fanout RPC * Handle manually mandatory field * Provide new API endpoint for alarm state * Implement the combination evaluator * Add alarm combination API * Notify with string representation of alarm reason * Convert BoundedInt value from json into int * Fix for timestamp precision in SQLAlchemy * Add source field to Meter model * Refactor threshold evaluator * Alarm API update * Update requirements * WSME 0.5b5 breaking unit tests * Fix failed downgrade in migrations * refactor db2 get_meter_statistics method to support mongodb and db2 * tests: import pipeline config * Fix a tiny mistake in api doc * collector-udp: use dispatcher rather than storage * Imported Translations from Transifex * Drop sitepackages=False from tox.ini * Update sphinxcontrib-pecanwsme to 0.3 * Architecture enhancements * Force MySQL to use InnoDB/utf8 * Update alembic requirement to 0.6.0 version * Correctly output the sample content in the file publisher * Pecan assuming meter names are extensions * Handle inst not found exceptions in pollsters * Catch exceptions from nova client in poll_and_publish * doc: fix storage backend features status * Add timestamp filtering cases in storage tests * Imported Translations from Transifex * Use global openstack requirements * Add group by statistics examples in API v2 docs * Add docstrings to some methods * add tests for _query_to_kwargs func * validate counter_type when posting samples * Include auth_token middleware in sample config * Update config generator * run-tests: fix MongoDB start wait * Imported Translations from Transifex * Fix handling of bad paths in Swift middleware * Drop the *.create.start notification for Neutron * Make the Swift-related doc more explicit * Fix to return latest resource metadata * Update the high level architecture * Alarm history storage implementation for sqlalchemy * Improve libvirt vnic parsing with missing mac! * Handle missing libvirt vnic targets! * Make type guessing for query args more robust * add MAINTAINERS file * nova_notifier: fix tests * Update openstack.common.policy from oslo-incubator * Clean-ups related to alarm history patches * Improved MongoClient pooling to avoid out of connections error * Disable the pymongo pooling feature for tests * Fix wrong migrations * Fixed nova notifier unit test * Add group by statistics in API v2 * Update to tox 1.6 and setup.py develop * Add query support to alarm history API * Reject duplicate events * Fixes a bug in Kwapi pollster * alarm api: rename counter_name to meter_name * Fixes service startup issue on Windows * Handle volume.resize.* notifications * Network: process metering reports from Neutron * Alarm history storage implementation for mongodb * Fix migration with fkeys * Fixes two typos in this measurements.rst * Add a fake UUID to Meter on API level * Append /usr/sbin:/sbin to the path for searching mongodb * Plug alarm history logic into the API * Added upper version boundry for six * db2 distinct call results are different from mongodb call * Sync rpc from oslo-incubator * Imported Translations from Transifex * Add pagination parameter to the database backends of storage * Base Alarm history persistence model * Fix empty metadata issue of instance * alarm: generate alarm_id in API * Import middleware from Oslo * Imported Translations from Transifex * Adds group by statistics for MongoDB driver * Fix wrong UniqueConstraint name * Adds else and TODO in statistics storage tests * Imported Translations from Transifex * Extra indexes cleanup * API FunctionalTest class lacks doc strings * install manual last few sections format needs to be fixed * api: update v1 for Flask >= 0.10 * Use system locale when Accept-Language header is not provided * Adds Hyper-V compute inspector * missing resource in middleware notification * Support for wildcard in pipeline * Refactored storage tests to use testscenarios * doc: replace GitHub by git.openstack.org * api: allow usage of resource_metadata in query * Remove useless doc/requirements * Fixes non-string metadata query issue * rpc: reduce sleep time * Move sqlachemy tests only in test_impl_sqlachemy * Raise Error when pagination/groupby is missing * Raise Error when pagination support is missing * Use timeutils.utcnow in alarm threshold evaluation * db2 support * plugin: remove is_enabled * Doc: improve doc about Nova measurements * Storing events via dispatchers * Imported Translations from Transifex * ceilometer-agent-compute did not catch exception for disk error * Change counter to sample in network tests * Change counter to sample in objectstore tests * Remove no more used code in test_notifier * Change counter to sample vocable in cm.transformer * Change counter to sample vocable in cm.publisher * Change counter to sample vocable in cm.image * Change counter to sample vocable in cm.compute * Change counter to sample vocable in cm.energy * Use samples vocable in cm.publisher.test * Change counter to sample vocable in volume tests * Change counter to sample vocable in api tests * Add the source=None to from_notification * Make RPCPublisher flush method threadsafe * Enhance delayed message translation when _ is imported * Remove use_greenlets argument to MongoClient * Enable concurrency on nova notifier tests * Imported Translations from Transifex * Close database connection for alembic env * Fix typo in 17738166b91 migration * Don't call publisher without sample * message_id is not allowed to be submitted via api * Api V2 post sample refactoring * Add SQLAlchemy implementation of groupby * Fixes failed notification when deleting instance * Reinitialize pipeline manager for service restart * Sync gettextutils from oslo-incubator * Doc: clearly state that one can filter on metadata * Add HTTP request/reply samples * Use new olso fixture in CM tests * Imported Translations from Transifex * Bump hacking to 0.7.0 * Fix the dict type metadata missing issue * Raise error when period with negative value * Imported Translations from Transifex * Import missing gettext _ * Remove 'counter' occurences in pipeline * Remove the mongo auth warning during tests * Change the error message of resource listing in mongodb * Change test_post_alarm case in test_alarm_scenarios * Skeletal alarm history API * Reorg alarms controller to facilitate history API * Fix Jenkins failed due to missing _ * Fix nova test_notifier wrt new notifier API * Remove counter occurences from documentation * Updated from global requirements * Fixes dict metadata query issue of HBase * s/alarm/alarm_id/ in alarm notification * Remove unused abstract class definitions * Removed unused self.counters in storage test class * Initial alarming documentation * Include previous state in alarm notification * Consume notification from the default queue * Change meter.resource_metadata column type * Remove MongoDB TTL support for MongoDB < 2.2 * Add first and last sample timestamp * Use MongoDB aggregate to get resources list * Fix resources/meters pagination test * Handle more Nova and Neutron events * Add support for API message localization * Add the alarm id to the rest notifier body * fix alarm notifier tests * Sync gettextutils from oslo * Fix generating coverage on MacOSX * Use the new nova Instance class * Return message_id in POSTed samples * rpc: remove source argument from message conversion * Remove source as a publisher argument * Add repeat_actions to alarm * Rename get_counters to get_samples * Add pagination support for MongoDB * Doc: measurements: add doc on Cinder/Swift config * Update nova_client.py * objectstore: trivial cleanup in _Base * Add support for CA authentication in Keystone * add unit attribute to statistics * Fix notify method signature on LogAlarmNotifier * Fix transformer's LOG TypeError * Update openstack.common * Fixes Hbase metadata query return wrong result * Fix Hacking 0.6 warnings * Make middleware.py Python 2.6 compatible * Call alembic migrations after sqlalchemy-migrate * Rename ceilometer.counter to ceilometer.sample * Added separate MongoDB database for each test * Relax OpenStack upper capping of client versions * Refactored MongoDB connection pool to use weakrefs * Centralized backends tests scenarios in one place * Added tests to verify that local time is correctly handled * Refactored impl_mongodb to use full connection url * calling distinct on _id field against a collection is slow * Use configured endpoint_type everywhere * Allow use of local conductor * Update nova configuration doc to use notify_on_state_change * doc: how to inject user-defined data * Add documentation on nova user defined metadata * Refactored API V2 tests to use testscenarios * Refactored API V1 tests to use testscenarios * alarm: Per user setting to disable ssl verify * alarm: Global setting to disable ssl verification * Imported Translations from Transifex * Implementation of the alarm RPCAlarmNotifier * Always init cfg.CONF before running a test * Sets storage_conn in CollectorService * Remove replace/preserve logic from rate of change transformer * storage: remove per-driver options * hbase: do not register table_prefix as a global option * mongodb: do not set replica_set as a global option * Change nose to testr in the documentation * Fixed timestamp creation in MongoDB mapreduce * Ensure url is a string for requests.post * Implement a https:// in REST alarm notification * Implement dot in matching_metadata key for mongodb * trailing slash in url causes 404 error * Fix missing foreign keys * Add cleanup migration for indexes * Sync models with migrations * Avoid dropping cpu_util for multiple instances * doc: /statistics fields are not queryable (you cannot filter on them) * fix resource_metadata failure missing image data * Standardize on X-Project-Id over X-Tenant-Id * Default to ctx user/project ID in sample POST API * Multiple dispatcher enablement * storage: fix clear/upgrade order * Lose weight for Ceilometer log in verbose mode * publisher.rpc: queing policies * Remove useless mongodb connection pool comment * Add index for db.meter by descending timestamp * doc: add a bunch of functional examples for the API * api: build the storage connection once and for all * Fix the argument of UnknownArgument exception * make publisher procedure call configurable * Disable mongod prealloc, wait for it to start * Added alembic migrations * Allow to enable time to live on metering sample * Implement a basic REST alarm notification * Imported Translations from Transifex * Ensure correct return code of run-tests.sh * File based publisher * Unset OS_xx variable before generate configuration * Use run-tests.sh for tox coverage tests * Emit cpu_util from transformer instead of pollster * Allow simpler scale exprs in transformer.conversions * Use a real MongoDB instance to run unit tests * Allow to specify the endpoint type to use * Rename README.md to README.rst * Use correct hostname to get instances * Provide CPU number as additional metadata * Remove get_counter_names from the pollster plugins * Sync SQLAlchemy models with migrations * Transformer to measure rate of change * Make sure plugins are named after their meters * Break up the swift pollsters * Split up the glance pollsters * Make visual coding style consistent * Separate power and energy pollsters * Break up compute pollsters * Implement a basic alarm notification service * Optionally store Events in Collector * Fix issue with pip installing oslo.config-1.2.0 * Transformer to convert between units * publisher.rpc: make per counter topic optional * ceilometer tests need to be enabled/cleaned * Also accept timeout parameter in FakeMemCache * Fix MongoDB backward compat wrt units * Use oslo.sphinx and remove local copy of doc theme * Reference setuptools and not distribute * enable v2 api hbase tests * Register all interesting events * Unify Counter generation from notifications * doc: enhance v2 examples * Update glossary * Imported Translations from Transifex * Imported Translations from Transifex * Filter query op:gt does not work as expected * sqlalchemy: fix performance issue on get_meters() * enable v2 api sqlalchemy tests * Update compute vnic pollster to use cache * Update compute CPU pollster to use cache * Update compute disk I/O pollster to use cache * update Quantum references to Neutron * Update swift pollster to use cache * Update kwapi pollster to use cache * Update floating-ip pollster to use cache * Update glance pollster to use cache * Add pollster data cache * Fix flake8 errors * Update Oslo * Enable Ceilometer to support mongodb replication set * Fix return error when resource can't be found * Simple service for singleton threshold eval * Basic alarm threshold evaluation logic * add metadata to nova_client results * Bring in oslo-common rpc ack() changes * Pin the keystone client version * Fix auth logic for PUT /v2/alarms * Imported Translations from Transifex * Change period type in alarms API to int * mongodb: fix limit value not being an integer * Check that the config file sample is always up to date * api: enable v2 tests on SQLAlchemy & HBase * Remove useless periodic_interval option * doc: be more explicit about network counters * Capture instance metadata in reserved namespace * Imported Translations from Transifex * pep8: enable E125 checks * pep8: enable F403 checks * pep8: enable H302 checks * pep8: enable H304 checks * pep8: enable H401 * pep8: enable H402 checks * Rename the MeterPublisher to RPCPublisher * Replace publisher name by URL * Enable pep8 H403 checks * Activate H404 checks * Ceilometer may generate wrong format swift url in some situations * Code cleanup * Update Oslo * Use Flake8 gating for bin/ceilometer-* * Update requirements to fix devstack installation * Update to the latest stevedore * Start gating on H703 * Remove disabled_notification_listeners option * Remove disabled_compute_pollsters option * Remove disabled_central_pollsters option * Longer string columns for Trait and UniqueNames * Fix nova notifier tests * pipeline: switch publisher loading model to driver * Enforce reverse time-order for sample return * Remove explicit distribute depend * Use Python 3.x compatible octal literals * Improve Python 3.x compatibility * Fix requirements * Corrected path for test requirements in docs * Fix some typo in documentation * Add instance_scheduled in entry points * fix session connection * Remove useless imports, reenable F401 checks * service: run common initialization stuff * Use console scripts for ceilometer-api * Use console scripts for ceilometer-dbsync * Use console scripts for ceilometer-agent-compute * Use console scripts for ceilometer-agent-central * agent-central: use CONF.import_opt rather than import * Move os_* options into a group * Use console scripts for ceilometer-collector * sqlalchemy: migration error when running db-sync * session flushing error * api: add limit parameters to meters * python3: Introduce py33 to tox.ini * Start to use Hacking * Session does not use ceilometer.conf's database_connection * Add support for limiting the number of samples returned * Imported Translations from Transifex * Add support policy to installation instructions * sql: fix 003 downgrade * service: remove useless PeriodicService class * Fix nova notifier tests * Explicitly set downloadcache in tox.ini * Imported Translations from Transifex 2013.2.b1 --------- * Switch to sphinxcontrib-pecanwsme for API docs * Update oslo, use new configuration generator * doc: fix hyphens instead of underscores for 'os*' conf options * Allow specifying a listen IP * Log configuration values on API startup * Don't use pecan to configure logging * Mark sensitive config options as secret * Imported Translations from Transifex * ImagePollster record duplicate counter during one poll * Rename requires files to standard names * Add an UDP publisher and receiver * hbase metaquery support * Imported Translations from Transifex * Fix and update extract_opts group extraction * Fix the sample name of 'resource_metadata' * Added missing source variable in storage drivers * Add Event methods to db api * vnics: don't presume existence of filterref/filter * force the test path to a str (sometimes is unicode) * Make sure that v2 api tests have the policy file configured * Imported Translations from Transifex * setup.cfg misses swift filter * Add a counter for instance scheduling * Move recursive_keypairs into utils * Replace nose with testr * Use fixtures in the tests * fix compute units in measurement doc * Allow suppression of v1 API * Restore default interval * Change from unittest to testtools * remove unused tests/skip module * Imported Translations from Transifex * Get all tests to use tests.base.TestCase * Allow just a bit longer to wait for the server to startup * Document keystone_authtoken section * Restore test dependency on Ming * Set the default pipline config file for tests * Imported Translations from Transifex * Fix cross-document references * Fix config setting references in API tests * Restrict pep8 & co to pep8 target * Fix meter_publisher in setup.cfg * Use flake8 instead of pep8 * Imported Translations from Transifex * Use sqlalchemy session code from oslo * Switch to pbr * fix the broken ceilometer.conf.sample link * Add a direct Ceilometer notifier * Do the same auth checks in the v2 API as in the v1 API * Add the sqlalchemy implementation of the alarms collection * Allow posting samples via the rest API (v2) * Updated the ceilometer.conf.sample * Don't use trivial alarm_id's like "1" in the test cases * Fix the nova notifier tests after a nova rename * Document HBase configuration * alarm: fix MongoDB alarm id * Use jsonutils instead of json in test/api.py * Connect the Alarm API to the db * Add the mongo implementation of alarms collection * Move meter signature computing into meter_publish * Update WSME dependency * Imported Translations from Transifex * Add Alarm DB API and models * Imported Translations from Transifex * Remove "extras" again * add links to return values from API methods * Modify limitation on request version * Doc improvements * Rename EventFilter to SampleFilter * Fixes AttributeError of FloatingIPPollster * Add just the most minimal alarm API * Update oslo before bringing in exceptions * Enumerate the meter type in the API Meter class * Remove "extras" as it is not used * Adds examples of CLI and API queries to the V2 documentation * Measurements documentation update * update the ceilometer.conf.sample * Set hbase table_prefix default to None * glance/cinder/quantum counter units are not accurate/consistent * Add some recommendations about database * Pin SQLAlchemy to 0.7.x * Ceilometer configuration.rst file not using right param names for logging * Fix require_map_reduce mim import * Extend swift middleware to collect number of requests * instances: fix counter unit * Remove Folsom support * transformer, publisher: move down base plugin classes * pipeline, publisher, transformer: reorganize code * Fix tests after nova changes * Update to the lastest loopingcall from oslo * Imported Translations from Transifex * update devstack instructions for cinder * Update openstack.common * Reformat openstack-common.conf * storage: move nose out of global imports * storage: get rid of get_event_interval * Remove gettext.install from ceilometer/__init__.py * Prepare for future i18n use of _() in nova notifier * Update part of openstack.common * Convert storage drivers to return models * Adpated to nova's gettext changes * add v2 query examples * storage: remove get_volume_sum and get_volume_max * api: run tests against HBase too * api: run sum unit tests against SQL backend too * Split and fix live db tests * Remove impl_test * api: run max_resource_volume test on SQL backend * Refactor DB tests * fix volume tests to utilize VOLUME_DELETE notification * Open havana development, bump to 2013.2 2013.1 ------ * Change the column counter_volume to Float * tests: disable Ming test if Ming unavailable * Imported Translations from Transifex * enable arguments in tox * api: run max_volume tests on SQL backend too * api: run list_sources tests on SQL and Mongo backend * api: run list_resources test against SQL * api: handle case where metadata is None * Fix statistics period computing with start/end time * Allow publishing arbitrary headers via the "storage.objects.*.bytes" counter * Updated the description of get_counters routine * enable xml error message response * Swift pollster silently return no counter if keystone endpoint is not present * Try to get rid of the "events" & "raw events" naming in the code * Switch to python-keystoneclient 0.2.3 * include a copy of the ASL 2.0 * add keystone configuration instructions to manual install docs * Update openstack.common * remove unused dependencies * Set the default_log_levels to include keystoneclient * Switch to final 1.1.0 oslo.config release * Add deprecation warnings for V1 API * Raise stevedore requirement to 0.7 * Fixed the blocking unittest issues * Fix a pep/hacking error in a swift import * Add sample configuration files for mod_wsgi * Add a tox target for building documentation * Use a non-standard port for the test server * Ensure the statistics are sorted * Start both v1 and v2 api from one daemon * Handle missing units values in mongodb data * Imported Translations from Transifex * Make HACKING compliant * Update manual installation instructions * Fix oslo.config and unittest * Return something sane from the log impl * Fix an invalid test in the storage test suite * Add the etc directory to the sdist manifest * api: run compute duration by resource on SQL backend * api: run list_projects tests against SQL backend too * api: run list users test against SQL backend too * api: run list meters tests against SQL backend too * Kwapi pollster silently return no probre if keystone endpoint is not present * HBase storage driver, initial version * Exclude tests directory from installation * Ensure missing period is treated consistently * Exclude tests when installing ceilometer * Run some APIv1 tests on different backends * Remove old configuration metering_storage_engine * Set where=tests * Decouple the nova notifier from ceilometer code * send-counter: fix & test * Remove nose wrapper script * Fix count type in MongoDB * Make sure that the period is returned as an int as the api expects an int * Imported Translations from Transifex * Remove compat cfg wrapper * compute: fix unknown flavor handling * Allow empty dict as metaquery param for sqlalchemy * Add glossary definitions for additional terms * Support different publisher interval * Fix message envelope keys * Revert recent rpc wire format changes * Document the rules for units * Fix a bug in compute manager test case * plugin: don't use @staticmethod with abc * Support list/tuple as meter message value * Imported Translations from Transifex * Update common to get new kombu serialization code * Disable notifier tests * pipeline: manager publish multiple counters * Imported Translations from Transifex * Use oslo-config-2013.1b3 * mongodb: make count an integer explicitely * tests: allow to run API tests on live db * Update to latest oslo-version * Imported Translations from Transifex * Add directive to MANIFEST.in to include all the html files * Use join_consumer_pool() for notifications * Update openstack.common * Add period support in storage drivers and API * Update openstack/common tree * storage: fix mongo live tests * swift: configure RPC service correctly * Fix tox python version for Folsom * api: use delta_seconds() * transformer: add acculumator transformer * Import service when cfg.CONF.os_* is used * pipeline: flush after publishing call * plugin: format docstring as rst * Use Mongo finalize to compute avg and duration * Code cleanup, remove useless import * api: fix a test * compute: fix notifications test * Move counter_source definition * Allow to publish several counters in a row * Fixed resource api in v2-api * Update meter publish with pipeline framework * Use the same Keystone client instance for pollster * pipeline: fix format error in logging * More robust mocking of nova conductor * Mock more conductor API methods to unblock tests * Update pollsters to return counter list * Update V2 API documentation * Added hacking.py support to pep8 portion of tox * setup: fix typo in package data * Fix formatting issue with v1 API parameters * Multiple publisher pipeline framework * Remove setuptools_git from setup_requires * Removed unused param for get_counters() * Use WSME 0.5b1 * Factorize agent code * Fixed the TemplateNotFound error in v1 api * Ceilometer-api is crashing due to pecan module missing * Clean class variable in compute manager test case * Update nova notifier test after nova change * Fix documentation formatting issues * Simplify ceilometer-api and checks Keystone middleware parsing * Fix nova conf compute_manager unavailable * Rename run_tests.sh to wrap_nosetests.sh * Update openstack.common * Corrected get_raw_event() in sqlalchemy * Higher level test for db backends * Remove useless imports * Flatten the v2 API * Update v2 API for WSME code reorg * Update WebOb version specification * Remove the ImageSizePollster * Add Kwapi pollster (energy monitoring) * Fixes a minor documentation typo * Peg the version of Ming used in tests * Update pep8 to 1.3.3 * Remove leftover useless import * Enhance policy test for init() * Provide the meters unit's in /meters * Fix keystoneclient auth_token middleware changes * policy: fix policy_file finding * Remove the _initialize_config_options * Add pyflakes * Make the v2 API date query parameters consistent * Fix test blocking issue and pin docutils version * Apply the official OpenStack stylesheets and templates to the Doc build * Fixed erroneous source filter in SQLAlchemy * Fix warnings in the documentation build * Handle finish and revert resize notifications * Add support for Folsom version of Swift * Implement user-api * Add support for Swift incoming/outgoing trafic metering * Pass a dict configuration file to auth_keystone * Import only once in nova_notifier * Fix MySQL charset error * Use default configuration file to make test data * Fix Glance control exchange * Move back api-v1 to the main api * Fix WSME arguments handling change * Remove useless gettext call in sql engine * Ground work for transifex-ify ceilometer * Add instance_type information to NetPollster * Fix dbsync API change * Fix image_id in instance resource metadata * Instantiate inspector in compute manager * remove direct nova db access from ceilometer * Make debugging the wsme app a bit easier * Implements database upgrade as storage engine independent * Fix the v1 api importing of acl * Add the ability to filter on metadata * Virt inspector directly layered over hypervisor API * Move meter.py into collector directory * Change mysql schema from latin1 to utf8 * Change default os-username to 'ceilometer' * Restore some metadata to the events and resources * Update documentation URL * Add sql db option to devstack for ceilometer * Remove debug print in V2 API * Start updating documentation for V2 API * Implement V2 API with Pecan and WSME * Move v1 API files into a subdirectory * Add test storage driver * Implement /meters to make discovery "nicer" from the client * Fix sqlalchemy for show_data and v1 web api * Implement object store metering * Make Impl of mongodb and sqlalchemy consistent * add migration migrate.cfg file to the python package * Fixes to enable the jenkins doc job to work * Lower the minimum required version of anyjson * Fix blocking test for nova notifier * network: remove left-over useless nova import * tools: set novaclient minimum version * libvirt: fix Folsom compatibility * Lower pymongo dependency * Remove rickshaw subproject * Remove unused rpc import * Adapted to nova's compute_driver moving * doc: fix cpu counter unit * tools: use tarballs rather than git for Folsom tests * Used auth_token middleware from keystoneclient * Remove cinderclient dependency * Fix latest nova changes * api: replace minified files by complete version * Add Folsom tests to tox * Handle nova.flags removal * Provide default configuration file * Fix mysql_engine option type * Remove nova.flags usage * api: add support for timestamp in _list_resources() * api: add timestamp interval support in _list_events() * tests: simplify api list_resources * Update openstack.common(except policy) * Adopted the oslo's rpc.Service change * Use libvirt num_cpu for CPU utilization calculation * Remove obsolete reference to instance.vcpus * Change references of /etc/ceilometer-{agent,collector}.conf to /etc/ceilometer/ceilometer.conf * Determine instance cores from public flavors API * Determine flavor type from the public nova API * Add comment about folsom compatibility change * Add keystone requirement for doc build * Avoid TypeError when loading libvirt.LibvirtDriver * Don't re-import flags and do parse_args instead of flags.FLAGS() * doc: rename stackforge to openstack * Fix pymongo requirements * Update .gitreview for openstack * Update use of nova config to work with folsom * compute: remove get_disks work-around * Use openstack versioning * Fix documentation build * document utc naive timestamp * Remove database access from agent pollsters * Fix merge error in central/manager.py * Fix nova config parsing * pollster trap error due to zero floating ip * Use the service.py in openstack-common * Allow no configured sources, provide a default file * Add service.py from openstack-common * Update common (except policy) * nova fake libvirt library breaking tests * Move db access out into a seperate file * Remove invalid fixme comments * Add new cpu_util meter recording CPU utilization % * Fix TypeError from old-style publish_counter calls * Fix auth middleware configuration * pin sqlalchemy to 0.7.x but not specifically 0.7.8 * add mongo index names * set tox to ignore global packages * Provide a way to disable some plugins * Use stevedore to load all plugins * implement get_volume_max for sqlalchemy * Add basic text/html renderer * network: floating IP account in Quantum * add unit test for CPUPollster * Clean up context usage * Add dependencies on clients used by pollsters * add ceilometer-send-counter * Update openstack.common.cfg * Fix tests broken by API change with Counter class * api: add source detail retrieval * Set source at publish time * Instance pollster emits instance. meter * timestamp columns in sqlalchemy not timezone aware * Remove obsolete/incorrect install instructions * network: emit router meter * Fix sqlalchemy performance problem * Added a working release-bugs.py script to tools/ * Change default API port * sqlalchemy record_meter merge objs not string * Use glance public API as opposed to registry API * Add OpenStack trove classifier for PyPI * bump version number to 0.2 0.1 --- * Nova libvirt release note * Update metadata for PyPI registration * tox: add missing venv * Fixes a couple typos * Counter renaming * Set correct timestamp on floatingip counter * Fix API change in make_test_data.py * Fix Nova URL in doc * Some more doc fixes * Ignore instances in the ERROR state * Use the right version number in documentation * doc: fix network.*.* resource id * image: handle glance delete notifications * image: handle glance upload notifications * image: add update event, fix ImageServe owner * network: fix create/update counter type & doc * Assorted doc fixes * add max/sum project volume and fix tests * Add general options * compute.libvirt: split read/write counters * API: add Keystone ACL and policy support * Add documentation for configuration options * network: do not emit counter on exists event, fix resource id * Move net function in class method and fix instance id * Prime counter table * Fix the configuration for the nova notifier * Initialize the control_exchange setting * Set version 0.1 * Make the instance counters use the same type * Restore manual install documentation * add quantum release note * Add release notes to docs * Update readme and create release notes * Remove duration field in Counter * Add counter for number of packets per vif * Move instance counter into its own pollster * Add a request counter for instance I/O * Rename instance disk I/O counter * Rename instances network counters * Use constant rather than string from counter type * Update the architecture diagram * Increase default polling interval * Fix compute agent publishing call * network: listen for Quantum exists event * Correct requirements filename * Fix notification subscription logic * Fix quantum notification subscriptions * Split meter publishing from the global config obj * network: add counter for actions * network: listen for Quantum notifications * Rename absolute to gauge * Fix typo in control exchanges help texts * Rework RPC notification mechanism * Update packaging files * Update URL list * Update openstack.common * Add volume/sum API endpoint for resource meters * Add resource volume/max api call * Fix dependency on anyjson * Listen for volume.delete.start instead of end * implement sqlalchemy dbengine backend * Add a notification handler for image downloads * Allow glance pollster tests to run * Create tox env definition for using a live db * Picking up dependencies from pip-requires file * Specify a new queue in manager * Rework RPC connection * Stop using nova's rpc module * Add configuration script to turn on notifications * Pep8 fixes, implement pep8 check on tests subdir * Use standard CLI options & env vars for creds * compute: remove get_metadata_from_event() * Listen for volume notifications * Add pollster for Glance * Fix Nova notifier test case * Fix nova flag parsing * Add nova_notifier notification driver for nova * Split instance polling code * Use stevedore to load storage engine drivers * Implement duration calculation API * Create tool for generating test meter data * Update openstack-common code to latest * Add bin/ceilometer-api for convenience * Add local copy of architecture diagram * Add timestamp parameters to the API docs * Check for doc build dependency before building * Pollster for network internal traffic (n1,n2) * Fix PEP8 issues * Add archicture diagram to documentation * added mongodb auth * Change timestamp management for resources * Log the instance causing the error when a pollster fails * Document how to install with devstack * Remove test skipping logic * Remove dependency on nova test modules * Add date range parameters to resource API * Add setuptools-git support * Add separate notification handler for instance flavor * Change instance meter type * Split the existing notification handlers up * Remove redundancy in the API * Separate the tox coverage test setup from py27 * Do not require user or project argument for event query * Add pymongo dependency for readthedocs.org build * Update openstack.common * Add API documentation * Be explicit about test dir * Add list projects API * Sort list of users and projects returned from queries * Add project arg to event and resource queries * Fix "meter" literal in event list API * collector exception on record_metering_data * Add API endpoint for listing raw event data * Change compute pollster API to work on one instance at a time * Create "central" agent * Skeleton for API server * fix use of source value in mongdb driver * Add {root,ephemeral}_disk_size counters * Implements vcpus counter * Fix nova configuration loading * Implements memory counter * Fix and document counter types * Check compute driver using new flag * Add openstack.common.{context,notifier,log} and update .rpc * Update review server link * Add link to roadmap * Add indexes to MongoDB driver * extend developer documentation * Reset the correct nova dependency URL * Switch .gitreview to use OpenStack gerrit * Add MongoDB engine * Convert timestamps to datetime objects before storing * Reduce complexity of storage engine API * Remove usage of nova.log * Documentation edits: * fix typo in instance properties list * Add Sphinx wrapper around existing docs * Configure nova.flags as well as openstack.common.cfg * First draft of plugin/agent documentation. Fixes bug 1018311 * Essex: update Nova to 2012.1.1, add python-novaclient * Split service preparation, periodic interval configurable * Use the same instance metadata everywhere * Emit meter event for instance "exists" * Start defining DB engine API * Fallback on nova.rpc for Essex * Add instance metadata from notification events * Combined fix to get past broken state of repo * Add more metadata to instance counter * Register storage options on import * Add Essex tests * log more than ceilometer * Remove event_type field from meter messages * fix message signatures for nested dicts * Remove nova.flags usage * Copy openstack.common.cfg * check message signatures in the collector * Sketch out a plugin system for saving metering data * refactor meter event publishing code * Add and use ceilometer own log module * add counter type field * Use timestamp instead of datetime when creating Counter * Use new flag API * Fix a PEP8 error * Make the stand-alone test script mimic tox * Remove unneeded eventlet test requirement * Add listeners for other instance-related events * Add tox configuration * Use openstack.common.cfg for ceilometer options * Publish and receive metering messages * Add floating IP pollster * Fix tests based on DB by importing nova.tests * make the pollsters in the agent plugins * Build ceilometer-agent and ceilometer-collector * Add plugin support to the notification portion of the collector daemon * Add CPU time fetching * Add an example function for converting a nova notification to a counter * add a tool for recording notifications and replaying them * Add an exception handler to deal with errors that occur when the info in nova is out of sync with reality (as on my currently broken system). Also adds a nova prefix to the logger for now so messages from this module make it into the log file * Periodically fetch for disk io stats * Use nova.service, add a manager class * Change license to Apache 2.0 * Add setup.py * Import ceilometer-nova-compute * Ignore pyc files * Add link to blueprint * Add .gitreview file * initial commit ceilometer-6.1.5/test-requirements.txt0000664000567000056710000000247513072744706021277 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. # Hacking already pins down pep8, pyflakes and flake8 hacking<0.11,>=0.10.0 Babel!=2.3.0,!=2.3.1,!=2.3.2,!=2.3.3,>=1.3 # BSD contextlib2>=0.4.0 # PSF License coverage>=3.6 # Apache-2.0 elasticsearch<2.0,>=1.3.0 # Apache-2.0 fixtures<2.0,>=1.3.1 # Apache-2.0/BSD happybase!=0.7,>=0.5,<1.0.0;python_version=='2.7' # MIT mock>=1.2 # BSD PyMySQL>=0.6.2 # MIT License os-win>=0.2.3 # Apache-2.0 oslo.cache>=1.5.0 # Apache-2.0 # Docs Requirements oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 reno>=0.1.1 # Apache2 oslotest>=1.10.0 # Apache-2.0 oslo.vmware>=1.16.0,<2.17.0 # Apache-2.0 overtest>=0.10.0 # Apache-2.0 psycopg2>=2.5 # LGPL/ZPL pylint==1.4.5 # GNU GPL v2 pymongo!=3.1,>=3.0.2 # Apache-2.0 gnocchiclient>=2.1.0 # Apache-2.0 python-subunit>=0.0.18 # Apache-2.0/BSD sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD sphinxcontrib-httpdomain # BSD sphinxcontrib-pecanwsme>=0.8 # Apache-2.0 testrepository>=0.0.18 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD testtools>=1.4.0 # MIT gabbi>=1.11.0 # Apache-2.0 requests-aws>=0.1.4 # BSD License (3 clause) tempest-lib>=0.14.0 # Apache-2.0 tempest>=14.0.0 # Apache-2.0 WebTest>=2.0 # MIT ceilometer-6.1.5/tox.ini0000664000567000056710000001035313072744706016343 0ustar jenkinsjenkins00000000000000[tox] minversion = 1.6 skipsdist = True envlist = py27,py34,functional,py34-functional,pep8 [testenv] deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt install_command = pip install -U {opts} {packages} usedevelop = True setenv = VIRTUAL_ENV={envdir} OS_TEST_PATH=ceilometer/tests/unit passenv = OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE commands = {toxinidir}/tools/pretty_tox.sh "{posargs}" oslo-config-generator --config-file=etc/ceilometer/ceilometer-config-generator.conf whitelist_externals = bash # TODO(ityaptin): With separation tests to unit and functional folders we need # set environment variable OS_TEST_PATH=./ceilometer/tests/functional # in "py-" jobs [testenv:py-mongodb] setenv = OS_TEST_PATH=ceilometer/tests/functional/ commands = overtest mongodb {toxinidir}/tools/pretty_tox.sh "{posargs}" [testenv:py-mysql] setenv = OS_TEST_PATH=ceilometer/tests/functional/ commands = overtest mysql {toxinidir}/tools/pretty_tox.sh "{posargs}" [testenv:py-pgsql] setenv = OS_TEST_PATH=ceilometer/tests/functional/ commands = overtest postgresql {toxinidir}/tools/pretty_tox.sh "{posargs}" # Functional tests for elastic search [testenv:py-elastic] setenv = OS_TEST_PATH=ceilometer/tests/functional/ commands = overtest elasticsearch {toxinidir}/tools/pretty_tox.sh "{posargs}" [testenv:functional] setenv = VIRTUAL_ENV={envdir} OS_TEST_PATH=ceilometer/tests/functional/ passenv = CEILOMETER_* commands = bash -x {toxinidir}/run-functional-tests.sh "{posargs}" [testenv:py34-functional] setenv = VIRTUAL_ENV={envdir} OS_TEST_PATH=ceilometer/tests/functional/ basepython = python3.4 passenv = CEILOMETER_* commands = bash -x {toxinidir}/run-functional-tests.sh "{posargs}" [testenv:integration] setenv = VIRTUAL_ENV={envdir} OS_TEST_PATH=./ceilometer/tests/integration OS_TEST_TIMEOUT=2400 GABBI_LIVE_FAIL_IF_NO_TEST=1 passenv = {[testenv]passenv} HEAT_* CEILOMETER_* GNOCCHI_* AODH_* GLANCE_* NOVA_* ADMIN_* # FIXME(sileht): run gabbi-run to failfast in case of error because testr # doesn't support --failfast, but we loose the testr report. commands = bash -c 'cd ceilometer/tests/integration/gabbi/gabbits-live && gabbi-run -x < autoscaling.yaml' # bash -x {toxinidir}/tools/pretty_tox.sh "{posargs}" # NOTE(chdent): The gabbi tests are also run under the other functional # tox targets. This target simply provides a target to directly run just # gabbi tests without needing to do discovery across the entire body of # tests. [testenv:gabbi] setenv = OS_TEST_PATH=ceilometer/tests/functional/gabbi passenv = CEILOMETER_* commands = overtest mongodb {toxinidir}/tools/pretty_tox.sh "{posargs}" [testenv:cover] setenv = OS_TEST_PATH=ceilometer/tests commands = python setup.py testr --slowest --coverage --testr-args="{posargs}" [testenv:pep8] commands = flake8 # Check that .po and .pot files are valid: bash -c "find ceilometer -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null" [testenv:releasenotes] commands = sphinx-build -a -E -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:genconfig] commands = oslo-config-generator --config-file=etc/ceilometer/ceilometer-config-generator.conf [testenv:docs] commands = python setup.py build_sphinx setenv = PYTHONHASHSEED=0 [testenv:pylint] commands = bash tools/lintstack.sh [testenv:venv] commands = {posargs} setenv = PYTHONHASHSEED=0 [testenv:debug] commands = bash -x oslo_debug_helper {posargs} [testenv:debug-mongodb] setenv = OS_TEST_PATH=ceilometer/tests/functional commands = overtest mongodb oslo_debug_helper {posargs} [testenv:debug-mysql] setenv = OS_TEST_PATH=ceilometer/tests/functional commands = overtest mysql oslo_debug_helper {posargs} [testenv:debug-pgsql] setenv = OS_TEST_PATH=ceilometer/tests/functional commands = overtest postgresql oslo_debug_helper {posargs} [testenv:debug-elastic] setenv = OS_TEST_PATH=ceilometer/tests/functional commands = overtest elasticsearch oslo_debug_helper {posargs} [flake8] ignore = exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build show-source = True [hacking] import_exceptions = ceilometer.i18n local-check-factory = ceilometer.hacking.checks.factory ceilometer-6.1.5/HACKING.rst0000664000567000056710000000206113072744703016620 0ustar jenkinsjenkins00000000000000Ceilometer Style Commandments ============================= - Step 1: Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ - Step 2: Read on Ceilometer Specific Commandments -------------------------------- - [C301] LOG.warn() is not allowed. Use LOG.warning() - [C302] Deprecated library function os.popen() Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. All unittest classes must ultimately inherit from testtools.TestCase. All setUp and tearDown methods must upcall using the super() method. tearDown methods should be avoided and addCleanup calls should be preferred. Never manually create tempfiles. Always use the tempfile fixtures from the fixture library to ensure that they are cleaned up. ceilometer-6.1.5/babel.cfg0000664000567000056710000000002113072744703016542 0ustar jenkinsjenkins00000000000000[python: **.py] ceilometer-6.1.5/AUTHORS0000664000567000056710000002650013072745162016076 0ustar jenkinsjenkins00000000000000Abhishek Chanda Abhishek Lekshmanan Abhishek Lekshmanan Adelina Tuvenie Ajaya Agrawal Akhil Hingane Ala Rezmerita Alessandro Pilotti Alex Holden Alexei Kornienko Ana Malagon Ananya Chatterjee Andreas Jaeger Andreas Jaeger Andrew Hutchings Andrew Melton Angus Lees Angus Salkeld Ann Kamyshnikova Artur Svechnikov Ashwin Agate Balazs Gibizer Bartosz Górski Ben Nemec Ben Nemec Boris Pavlovic Brad Pokorny Brant Knudson Brian Cline Brian Moss Brooklyn Chen Béla Vancsics Can ZHANG Cedric Soulas Chad Lung Chandan Kumar ChangBo Guo(gcb) Chaozhe.Chen ChenZheng Chinmaya Bharadwaj Chmouel Boudjnah Chris Dent Chris Dent Christian Berendt Christian Martinez Christian Schwede Chuck Short Clark Boylan Claudiu Belu Cyril Roelandt Cyril Roelandt Damian Van Vuuren Dan Florea Dan Prince Dan Travis Darren Birkett Davanum Srinivas David Peraza Dazhao Debo~ Dutta Dina Belova Dirk Mueller Divya Dong Ma Doug Hellmann Drew Thorstensen Edwin Zhai Emilien Macchi Emma Foley Endre Karlson Eoghan Glynn Eoghan Glynn Eric Brown Fabio Giannetti Fei Long Wang Feng Xi Yan Fengqian Gao Flavio Percoco François Charlier François Rossigneux Frederic FAURE Gangyi Luo Gauvain Pocentek Gerard Garcia Gordon Chung Graham Binns Guangyu Suo Hang Liu Hanxi Liu Haomeng, Wang Harri Hämäläinen Hisashi Osanai Hongbin Lu Igor Degtiarov Ihar Hrachyshka Ildiko Vancsa Ilya Sviridov Ilya Tyaptin IonuÈ› ArțăriÈ™i Jake Liu James E. Blair Jason Myers Jason Zhang Jay Lau Jay Pipes Jeremy Stanley Jie Li Jim Rollenhagen Jimmy McCrory Joanna H. Huang Joe Gordon Joe H. Rahme John H. Tran John Herndon JordanP JuPing Julien Danjou Justin SB KIYOHIRO ADACHI Kamil Rykowski Keith Byrne Ken Pepple Ken'ichi Ohmichi Ken'ichi Ohmichi Kennan Kennan Kevin McDonald Kirill Bespalov Kishore Juigil Koert van der Veer Komei Shimamura Ladislav Smola Lan Qi song Laszlo Hegedus Lena Novokshonova Lianhao Lu LinuxJedi LiuSheng Luis A. Garcia Luo Gangyi Maho Koshiya Mark McClain Mark McLoughlin Martin Geisler Martin Kletzander Mathew Odden Mathieu GagneÌ Matt Riedemann Mehdi Abaakouk Mehdi Abaakouk Michael Krotscheck Michael Still MichaÅ‚ JastrzÄ™bski Miguel Alex Cantu Miguel Grinberg Mike Spreitzer Monsyne Dragon Monty Taylor Morgan Fainberg Nadya Privalova Nadya Shakhat Nejc Saje Nick Barcet Nicolas Barcet (nijaba) Noorul Islam K M Octavian Ciuhandu PanFengyun Patrick East Paul Belanger Peter Portante Phil Neal Piyush Masrani Pradeep Kilambi Pradeep Kilambi Pradeep Kumar Singh Pradyumna Sampath Pádraig Brady Qiaowei Ren Rabi Mishra Rafael Rivero Rich Bowen Rikimaru Honjo Rob Raymond Robert Collins Robert Mizielski Rohit Jaiswal Romain Soufflet Roman Bogorodskiy Rosario Di Somma Ruslan Aliev Russell Bryant Ryan Petrello Ryota MIBU Saba Ahmed Sam Morrison Samta Samuel Merritt Sandy Walsh Sanja Nosan Sascha Peilicke Sean Dague Sergey Lukjanov Sergey Vilgelm Shane Wang Shengjie Min Shilla Saebi Shuangtai Tian Shubham Chitranshi Simona Iuliana Toader Sofer Athlan-Guyot Srinivas Sakhamuri Stas Maksimov Stefano Zilli Stephen Balukoff Stephen Gran Steve Lewis Steve Martinelli Steven Berler Sumant Murke Surya Prabhakar Svetlana Shturm Swami Reddy Swann Croiset Swapnil Kulkarni (coolsvap) Sylvain Afchain Takashi NATSUME Tatsuro Makita Terri Yu Thierry Carrez Thomas Bechtold Thomas Herve Thomas Herve Thomas Maddox Tong Li Ubuntu Victor Stinner Victor Stinner Vitalii Lebedynskyi Vitaly Gridnev Vladislav Kuzmin Wu Wenxiang Xia Linjuan XiaBing Yao Yaguang Tang Yanyan Hu Yassine Lamgarchal Yathiraj Udupi You Yamagata Yunhong, Jiang Zhi Kun Liu Zhi Yan Liu ZhiQiang Fan Zhongyue Luo Zi Lian Ji ananya23d annegentle ansaba ccrouch eNovance emilienm florent fujioka yuuichi gengjh ghanshyam gord chung guillaume pernot hanxi.liu hgangwx jiaxi jinxingfang jizilian joyce kairoaraujo kiwik-chenrui leizhang lianghuifei lijian liuqing liusheng lizheming lqslan lrqrun ls1175 lvdongbing lzhijun mizeng nellysmitt replay sanuptpm sh.huang shengjie min srsakhamuri tanlin terriyu unknown vagrant venkatamahesh vivek.nandavanam vivek.nandavanam xialinjuan xiangjun li xingzhou yanheven zhang-jinnan zhangguoqing zjingbj ceilometer-6.1.5/doc/0000775000567000056710000000000013072745164015572 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/doc/source/0000775000567000056710000000000013072745164017072 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/doc/source/index.rst0000664000567000056710000000340413072744706020735 0ustar jenkinsjenkins00000000000000.. Copyright 2012 Nicolas Barcet for Canonical Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================== Welcome to the Ceilometer developer documentation! ================================================== The :term:`Ceilometer` project is a data collection service that provides the ability to normalise and transform data across all current OpenStack core components with work underway to support future OpenStack components. Ceilometer is a component of the Telemetry project. Its data can be used to provide customer billing, resource tracking, and alarming capabilities across all OpenStack core components. This documentation offers information on how Ceilometer works and how to contribute to the project. Overview ======== .. toctree:: :maxdepth: 2 overview architecture measurements events webapi/index Developer Documentation ======================= .. toctree:: :maxdepth: 2 install/index configuration plugins new_meters testing contributing gmr Appendix ======== .. toctree:: :maxdepth: 1 releasenotes/index glossary api/index .. update index Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ceilometer-6.1.5/doc/source/conf.py0000664000567000056710000002275713072744706020407 0ustar jenkinsjenkins00000000000000# # Ceilometer documentation build configuration file, created by # sphinx-quickstart on Thu Oct 27 11:38:59 2011. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import subprocess import sys import os BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", "..")) sys.path.insert(0, ROOT) sys.path.insert(0, BASE_DIR) # This is required for ReadTheDocs.org, but isn't a bad idea anyway. os.environ['DJANGO_SETTINGS_MODULE'] = 'openstack_dashboard.settings' # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. # They can be extensions coming with Sphinx (named 'sphinx.ext.*') # or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinxcontrib.autohttp.flask', 'wsmeext.sphinxext', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinxcontrib.pecanwsme.rest', 'oslosphinx', ] wsme_protocols = ['restjson', 'restxml'] todo_include_todos = True # Add any paths that contain templates here, relative to this directory. if os.getenv('HUDSON_PUBLISH_DOCS'): templates_path = ['_ga', '_templates'] else: templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Ceilometer' copyright = u'2012-2015, OpenStack Foundation' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['**/#*', '**~', '**/#*#'] # The reST default role (used for this markup: `text`) # to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] primary_domain = 'py' nitpicky = False # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme_path = ['.'] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "nosidebar": "false" } # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1"] html_last_updated_fmt = subprocess.Popen( git_cmd, stdout=subprocess.PIPE).communicate()[0] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Ceilometerdoc' # -- Options for LaTeX output ------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Ceilometer.tex', u'Ceilometer Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ceilometer', u'Ceilometer Documentation', [u'OpenStack'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Ceilometer', u'Ceilometer Documentation', u'OpenStack', 'Ceilometer', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for Epub output -------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'Ceilometer' epub_author = u'OpenStack' epub_publisher = u'OpenStack' epub_copyright = u'2012-2015, OpenStack' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be an ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True ceilometer-6.1.5/doc/source/new_meters.rst0000664000567000056710000001053313072744706021777 0ustar jenkinsjenkins00000000000000.. Copyright 2012 New Dream Network (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _add_new_meters: ================== New measurements ================== Ceilometer is designed to collect measurements from OpenStack services and from other external components. If you would like to add new meters to the currently existing ones, you need to follow the guidelines given in this section. .. _meter_types: Types ===== Three type of meters are defined in Ceilometer: .. index:: double: meter; cumulative double: meter; gauge double: meter; delta ========== ============================================================================== Type Definition ========== ============================================================================== Cumulative Increasing over time (instance hours) Gauge Discrete items (floating IPs, image uploads) and fluctuating values (disk I/O) Delta Changing over time (bandwidth) ========== ============================================================================== When you're about to add a new meter choose one type from the above list, which is applicable. Units ===== 1. Whenever a volume is to be measured, SI approved units and their approved symbols or abbreviations should be used. Information units should be expressed in bits ('b') or bytes ('B'). 2. For a given meter, the units should NEVER, EVER be changed. 3. When the measurement does not represent a volume, the unit description should always describe WHAT is measured (ie: apples, disk, routers, floating IPs, etc.). 4. When creating a new meter, if another meter exists measuring something similar, the same units and precision should be used. 5. Meters and samples should always document their units in Ceilometer (API and Documentation) and new sampling code should not be merged without the appropriate documentation. ============ ======== ============== ======================= Dimension Unit Abbreviations Note ============ ======== ============== ======================= None N/A Dimension-less variable Volume byte B Time seconds s ============ ======== ============== ======================= Meters ====== Naming convention ----------------- If you plan on adding meters, please follow the convention below: 1. Always use '.' as separator and go from least to most discriminant word. For example, do not use ephemeral_disk_size but disk.ephemeral.size 2. When a part of the name is a variable, it should always be at the end and start with a ':'. For example do not use .image but image:, where type is your variable name. 3. If you have any hesitation, come and ask in #openstack-ceilometer Meter definitions ----------------- Meters definitions by default, are stored in separate configuration file, called :file:`ceilometer/meter/data/meter.yaml`. This is essentially a replacement for prior approach of writing notification handlers to consume specific topics. A detailed description of how to use meter definition is illustrated in the `admin_guide`_. .. _admin_guide: http://docs.openstack.org/admin-guide-cloud/telemetry-data-collection.html#meter-definitions Non-metric meters and events ---------------------------- Ceilometer supports collecting notifications as events. It is highly recommended to use events for capturing if something happened in the system or not as opposed to defining meters of which volume will be constantly '1'. Events enable better representation and querying of metadata rather than statistical aggregations required for Samples. When the event support is turned on for Ceilometer, event type meters are collected into the event database too, which can lead to the duplication of a huge amount of data. In order to learn more about events see the :ref:`events` section. ceilometer-6.1.5/doc/source/_templates/0000775000567000056710000000000013072745164021227 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/doc/source/_templates/.placeholder0000664000567000056710000000000013072744703023476 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/doc/source/6-storagemodel.png0000664000567000056710000014720113072744703022433 0ustar jenkinsjenkins00000000000000‰PNG  IHDR$Dβõi‡zTXtRaw profile type exifxÚUŽÛ €0Eÿ™ÂxÊ8ÆhâŽ/XMãù€››æPØ¯ó€¥ dÐæÝ  ^3t"1R휃w eâYƒðÝQçC}û&Öípu·f›mœvÞ…¤ÕLQY±¾SÂú(óì¿WÅŸnA¹,"jD—@ iTXtXML:com.adobe.xmp ·"¢äsBIT|dˆ IDATxÚìÝw|Tu¾ÿñ7)3“IÏ„„tJ¡ƒ ˆPQ îZײ°ºö½«ëº÷®îªWeu®¸å*kY]•âª4Å‚„PÔ$ %…^'™ôß“ ÒH†„¼žÇ>–Ìœsæ|?ç€9ïù–~õõõõp!7J\@¸p9 àrÀå$€ËyP wûú믵jÕ*Ùl6Š}”ÉdÒ‚ 4~üxŠè5è!ôr‰‰‰„ÐÇÙl6mÚ´‰BzzHçÏÊJåçSèc ,UÐëHçÏÊJ…e ÐÇ”úøHz%†l—#.G \Ž@¸p9 àrÀå$€ËH—#.G \Ž@¸p9 àrÀå$€ËH—#.G \Ž@¸p9 àrÀå$€ËH—#.G \Ž@¸p9 àrÀå<ÎÕ†ååå)??_éééŽÿo*==]6›M’4tèP§÷,‹‚ƒƒ)‹Å¢èèhîºÐ9HX­V¥¦¦*55U)))ÊÌÌìÐþlõgI R\\œ† ¦¸¸8s÷ÐI½6ÈËËÓæÍ›Û@Œ&…„9~6š¼< ÂñsiQ¡JŠò?—ª¬¤ÈéÚ¾}»¶oß.éd@1nÜ8?ž; €èUD^^ž’’’”˜˜xÚb@Ô E Œ•¯ |ýƒ9hh§?/7+C•¶ OûQy'2••vXU•öaMŠ   7N“&MbxíÐ+‰ääd­_¿¾Å¡Dxô3 ZÒ?,R’œŽ››•¡¼ìL?vXGS¨ªÒ¦‚‚mÙ²E[¶lQPPæÌ™£É“'swp=:HLLTbbb³ ¦ac'hØè d2{»ôœú‡EªX¤†»P’t8y¿’÷~£c¿—dï9±|ùr­[·N“'OÖ´iÓäííÍ@=2سgV­Z¥‚‚Çk£I£'NUü˜ ò ´ô˜s?ZƒãGËVnÕ‘ÔÚõå§*+)RAAÖ¯_¯M›6iÆŒ4Ñ£‰´´4­ZµÊ©G„_€Æ\xÉYé Ñ&³·†»PÃÇ]¨Œ#µóËOt"ýˆl6›Ö¯_¯ÄÄD†rРGV«U«V­r¬`!Ùƒˆ .¹Ò14¢7‰4T‘ƒ†:C95wî\&¿ôig=HNNÖ’%Kd³ÙW¯hš1fÂÔÝ#¢=ƒ‰ÃÉûõõ§¨¬¤HÔ¢E‹4{ölÍ™3‡;Ð'Õ@båʕڲe‹ã瘡#tñ•?éQsDt…Áñ£=XûvnÕþo¶ªªÒ>Œ#%%E ,Ppp0w" O9+DZZš–-[¦ÌÌLIö^—Ï™¯Áñ£ÏÙB›ÌÞšxéLÅ™ W/U~N–<¨§žzJ ,Ðøñã¹}†›«?pÏž=zþùçaÄ€¨Aºù¾GÎé0¢)¿@‹æÞù[?õ I’ÍfÓ?ÿùO­\¹’»Ðg¸´‡Ä×_­7ÞxÃñóä+4öÂKûdá'^:SáÑCôÉ»ËTUiÓ–-[T^^®¹sç²<(àœç²K—.u„£Isn¾»Ï†" ÕÍ÷="KH˜$iûöízþùçeµZ¹3ç4—K—.u,ééã „[îQä ¡T_ö¹%n¹G1CGH’233 %ç¼n$š†–0ÝxÇoÔ?,’Ê7a2{ëšy·+nô’%ç¾n $N #n¹G&3ó#œÎôko"”ô ÝHFt¡ /è–@bÓ¦M„gàÔPbÉ’%pNéò@"99Y«W¯–Dq&š†ÔŠ+( àœÑ¥„Õju|›o0štùœù„g`úµ79–ýì³Ï´gÏŠ8'ti ñüóÏËf³I’.Ÿ3ŸÕ4º@Â-÷È`4I’–-[¦´´4Šèõº,X±b…233%I£.¸XƒãGSÝ.`2{ëªH’l6›–-[Æ$—€^¯K‰ääd}öÙg’¤Qƒ4uæO©lŠ4T“¯HdŸäróæÍЫuI ±|ùrIöy#®¾q!Uíc/¼T¢I’6lØÀÐ @¯vÆÄÚµkUPP Iºà’+™Ä²MO¸Éi> z«3 $òòò´aÃIö¡c/¼”Šv#¿@‹FOœ*É>tcÓ¦MÐ+Q Ñô[úé 7QM˜xéLÇR ëÖ­c‚K@¯Ôé@"99Y”$?õ ùZ¨¦‹\>g¾$ûªLpÙýöìÙ£»îºK+W®¤ÐE:H¬_¿^’}"Ë1¦RI꩸ÑH’6oÞL/‰nÖ84fË–-Zºt)€.Щ@¢iïˆÑ§2‘åY0á’+%ÑKÂÕ¶oßN(] S½#Î>¿@ ½$ÎB 8s$èÑsÐKµû‡+°¸$B 8S$¶mÛ&‰Þ=_ E1CG8]tƒÁK×Ìý¡tV«UIII’¤q£èÑÄ(I*((О={(H73yyJ@èP ‘””$›Í&I3‘Þ=ÁàøÑ2MŽëƒîG(g®CDã°¿õ‹¤z=Dãä–Û·ogrK!”€3Óî@"//Ï1™å˜ /¡r=HüØ Ž?3—„ëJ@çµ;h:`PÜ(*׃ô‹”%$L’”’’BA\ˆP:§ÝDヮ%$L~*×ÄE‘$íÛ·b¸¡tœG{7LMMuzðEÏ2hØ(Øõ•$)99Yñññ>½+ZVQQÑæ6¡Ä‡«þ¡ÂÜãÚ¾}»$iáÂ…ZЮ@"99Ù±ºFÄÀXªÖEêøsRRR‡‰_ÿú׎kŒÎ!”€ökׯÞ’=˜ªõP¢5»^íÑ4pÂéM^mnÃð hŸvõhìÊ? jLfoªÖCE ŒÕ‰ô#ÊÌÌìô1.¼ì§ ìA1[` i_]è)mkW ‘‘‘!I åAµ' ¢oµQRçç‘졈˜8Šy†% uí²ÑØß/0ˆŠõ`~'¯O{&bD÷bøœ^›Drr²ãÏ–p*Öƒ5]Ž5==‚ô„в6‰üüü“¼ôèé,!a’$zB h®cD“oà]i|´¯ÆGûjÉâEmnûÎëKÛ÷E>þÍ®Î>B pÖf ÑøM{ã’’gÓ{o/Uuuõi߯¯¯×Ê7^=£Ïxå¯Ïöê0#x€}âÑ3Yi݃PNj3(//ï'êîœlmþhÍi·Ùöåf;|Pîîîþœ/7Ì]nC(v½åD== Š1L+—½¬™ 7´¸ÍÊe¯(nø(¥ùQµµ'W™(-.Ò’ÅÒ矮WnÎ ëÒWëÞ‡S`}ÊžÛô‹ë¯tì3>ÚW^fo%&Ÿh×þåÖ2M¦èA±ZñÑWZôÈúj˧ª¨°jÇÁ<î44 %X´÷êçæ¦Ó¦)zêTYâ‡Éè ~ý¤Š‚å%'ëȧ•±m[—}Þ­Ÿ&Izó²ËÛõ:z‡–®×ô%n½åDkjª5㚟(i×v¥|·¯Ùû™iGõÕgŸèŠÙ×Éf;FØlºcîÕzïí¥zðÑEÚ²çˆ~üÏZÿŸwtÇ3UQaï2~Â$íI+uì·'­T‰É'Ú½¿§§¡!˜(Õ_žüåœ8®{ûýæriŒ&/îê^JÐS¢÷ñ‹ŽÖœ×ÿ¥‹ÿð¨¢/½DÞ¡¡ò0ån4Ê',L/¿\—?ó'Íxþ/2øùQ°ˆ¿î§ŽrP@ ¡Š ûý_€å¬žhMM®þÉ\õë×O+ßx¥Ùû«Þ|M’4ëºùN¯¯\ö²R8 [~y¿®˜ýSùúùëŠÙ?ÕÏïy@‡&kusN´wOOIRqQ¡ óó´ä­µš{Û/5ïçwº´NM—fmºd+%ÐaDd¤f¾øò8PÖìíüûßõÁÍ·èí+¯Ò;3¯ÖÇ÷ÿJG·lQ}]ÂÎ?_3þߟåæqæÑÞ¼ìò>ñyP\7õ'5NŽèxÖO6,"J&_¢>X­’¢BÇë6[…Ö¬|S&_ª°ˆ(§}6´V’4㚟8½~éŒk$I_lú¨ÕÏlïþýúõ“$UWUéæ;î?£y,зC‰¯¿þšÂôPÿáQ}}ub÷n­[¸PÉッÒÌLÕVU©ÆfSîÚúäSújÑ"Õ×Öʯáso¤píd‰F¨?èCú`µŠ‹ tíÜ[šmäÇTIRdŒó*!1ƒc%IY™é­~^gö6œ; íVVR ²âÇÏf³™¢ô@^(K|¼l……úâ‰ÿUu+þݼEÁññ 4X¥-¬xÓÄ Ÿ;W!£GËè離ÒRå~÷½~x÷]e'%5Û¾£ó „¾â¯¿NÁ#FÈàã£j«Uù))J]»Vé_5¼<½½5Ãz§¥iím?×à+®ÐÈ›n’Ox¸¬ÙÙú~Å úȾÆÎš¥7Þ ŸðpUèІ Úÿæ¿Ï¨#v“λóÎfíÝúäS:ºeK§jfðñѼõëT|ô¨ÖÝ~‡.¸ï> ¼ü2y˜LzçêkÚ¬á€ñãÃõêßPCkNŽ~üä}¿b¥j«ªÎ¸æÕÑöw¤ í©GÛw¦õö_>ccuèÐ!•6é‘p6M¿æZ=ûLJ´úÍé–_þJýúõÓª7^‘Ÿ¿.Ÿ9§ÙöÖ2IÒÔ‘-/?7»ÕÏëÌþþAÜYh—¼ìt}¸òª®²I’n»í6?žÂô@Ñ—^"IJ]»VU%%mn¿ëÅ—Z|}ÈÌ™šôðoÕ¯I/*S` ¢.ž¢¨)“õÍßÿ¡”÷ßïôyŽœ?_çÝ}—ÓkF…Oœ¨ð‰uà­·´çÕלޯ­¬´ÿÁhÒÀË/×”G9ùïYL´&ý÷ïTž—§ÀÁƒuÞ=w;Þó0@ãn¿]•%¥J]³¦ÛÚØ™ãÕ4´ÉÝhÔÈyóÝOÛýyÃo¸AÜw¯ÔÐóM’|#"4î¿PÔ”)úäWÿåü@߉šwDgÚßÑ6tõ=u&õÍ”å÷ˆöò2ëŠY?Ñ+ßÔןo”¯¯Ÿ’ìÕ 7ÿB¦&t4ûøª´¸H‰É'äeöîðçéþ®–ŸsÜñçøøxîð^FL™2…ÂôPýGŽ”$eîØÑécøF„ëÂß<(I:ðÖ[:ôáG*ÏÍ•—Å¢Ó¦iìºà¾û”µk—JÒÓ;|üÀ!C4þ—wHõõ:ðÎ ýøñG²fçÈl±(æòË5îö_hÔÏ~¦Œ¯•ûý÷ŽýêjjìÿA0{iìí¿ÐöçëÈÆOeô÷×ÄT䤋4ú¶[奯ž~Zi[¿’)àä{ƒ¯ºÒHt¦ß½ý޾{û{‚t¶fŽ6yy)îÚkµí¹¿èèæÍNè-ñ8Pçßs·êjkµóÿ§£Ÿ}¦ºªJÅ Ó¤ß>$˰a}ë­Jú׿ΨæÝyÏt´ ­ÕÿŒï©Öô-n½ñ¤çÜhšñéº÷´þ½’¤„†kHÒà¡ö‡òœÇ;õYgº¿«U6Ya„è:^ûľ%ii>Fܵ?‘»Á ¤×—jÏ«¯9æŸ(ËÊÒ·ÞÒ¾åËåæá®¡³gwîø sÔÏÝ]‡>üP{^yE%i骭¬Téñã:ðÖ[Jù`Ô¯Ÿ†\su‹û}}uì³ÏupÝ:ÕØ*eÍÎÑ·K–H’BFVêktdÓfÕV:¿çÓmmìôñêë%I¦€¥õ•mØ ›M5­ÿ9ìÚõsw×·ßVêš5ª*)Q­R9ûöéË'ŸRͦѣ»¬æÝÑþ޶¡[î©NÖHôxçMœ¬¨˜ÁÚ™ø…¾þìS :L£ÇOhqÛéW'H:99e£ïöíÖÌ ãõìrzÝ£aFüÚÚÚNíFœ›<z`UWØ:}Œ°óìÃqúi‹ïÙ´Y’:nl§Žß¿á!óà† -ãFûv ½=Zrx£ó¹•eeÜóæß3x›»­]q¼£§œwkBÇŽ“$ûüófï:¤wf^­Ox KkÞÕíïhºûžêHý@û»­ 'Ø«ª´õ¨Ÿ}ÃÏ´äù§%I<òÔi·›{ë/õáû+õêßþ¬ÁCã5é’é:”ú½þøàÊ9q¼Y5p°ŽJUÒÎm>z\‡÷#ÎMÕÖ2ýýeðñVeqI§Žá=`€$éúÕ«ZÝÎ'<¼SÇ÷ •$k¹GqCïïÓ£<'Çéçºêê“ïåžæ½&Ýð»º]q¼âŒŒö×0ÌþyeÇ»¬æ]Ýþ޶¡»Ûבú€¾¥ÍQQöe4 r²zԉϹágrss“»»»f]wÓi·3šLzu凚¿ð.-~ê]<2\wÍŸ-Kpˆ¿öŽf]7ßiû‡Ÿø ‹ŒÖÝ?›£Ÿ\:¾ÃûŸm™GI²OF Âtk¶ýa¾~úõïŸÒ¯ÿT›Ÿ5é’éú0ñ»NíßÒyºZcO–Ž$Œ@×Êýþ;Å Uô%Sub÷îN£º¢B­œ=GUee]~ŽÕ6|¼åáå¥j«µùíÙËqÝ¥«ÛØÝ5kéÛÓÛ[F??ÙŠŠÎzÍ;Óþ޶¡§ßSàÜÕf §@¢¨€Šõp=Y{¶€0]£qþ„!3gÊ7¢íá!£Fiö¿^Óà+¯t¼Vš™)Iò‹îž¿ŸÖì’¤€[|ß?Æþºõĉn«SW·±»kvª²†ÚøEG÷ˆšw¦ýmCO¿§@$š.y<íG*Öƒåf§K A.þûµÿ€NìÞ-“I—=õ´¼,A§Ý6hèPM}âq"˰aŽ×O|kïY1bî¼÷ Ÿ8Q×¾ù†ÆÝ~{§Î1{ï>IRì¬kZ|?¶a%„ì}û»­N]ÑÆ~îî.«Ùéj8䪫š½8dˆ~öÉÇšùâÿ¹¬æiGÛÐZý{Â=úp !I’¤JºdöhM—ülÚ³„è‰þ³*òó0xæ¼þºFÞ4_~ÑÑr7eôóSððášð«ûuÕ?þ.sp°òSSµçÕWû§®]«›M1—]ª‹ÿð¨ü"#åæá!/Kâ®MÐ%?&¿¨(yz{wêüR׬Q]M­b¯¹Fãy‡|#Âån4Ê/2Rãn¿]±×\£ºšZ¥®]Ûm5:“6ÖØìWb.»Tn2v{ÍNupý:Õ×Õ)öš«5êæ›eô÷—‡É¨Ð±c5õ±?ÊÝhTîï\Vóδ¿£mhµþ=àžç.öld±XTXXH‰®éõ‰îdWÝCß}£ìL®sK†Ž˜(߀ö=„ç&kvŽ>¾ÿWºä‰Çe6LçÝu—λë®·Mÿê+}õô"§‰ ËNœPâ³ÖÅxTƒfÌР3ší—Ÿšª½¯ÿ«SçW|옾]ò’&üêWuóÍuóÍÎÔ×ëÛ—^TñÑ£ÝV£3ic~rŠBÇÕÔ?þQú£ýµ7/»¼[kvª¢ÃG´ç•WuÞÝwiü/ïÐø_ÞѬÆÞzËe5ïL=;Ú†¶ê¶ï)ÐljñãÇëСC*ÈÉ’­Ü*“Ù›Êõ@]±Âơ£§‘Ÿ®+~z'aDW–•¥ïº[1—]ª˜Ë.“%>^¦€@õsë§òÜ<åìß§”÷?P~JJ‹ûûüs;¦óæjÀøñò R]MŠÓÒtlËgúá½÷œ–Úì¨äÿ¼§¢ÃG4|îê?b„<½}TUZ¢Üï¾Ó÷«V+gß¾n¯QgÛ¸}ñóšôÛß*(.Nu5µ*iXR²»kvªïV¬PááÃ~ã ² &O³YÖìûâ xë­f“;vwÍ;Óþ޶¡µú÷„{ œ›úÕ×××·µQZZš-Z$Iºê†?šÊõ0¶r«–.~L’4kÖ,%$$thÿ'Ÿ|R™ “§¡e¡C4û¦_÷¸0â¹çžÓ¡C‡d.)QÜÁC\(ècR‡ÆªÜÏO±±±zøá‡) ×hW‰èèh™L&Ùl6e=D ÑO;ìøs\\\‡÷ì±Ç(büm¡g´Ÿ[{7l|È=zð;ªÖ5×0™LN+£À5# cÚHŒ7N’TV\¨’Â|*×Ã4Eé3C×î@bذaŽ?'ïÛIåzܬ •J:Á5# sÚHk̘1’¤”}»¨\²ï›­’ìÃ5$\‡0:Ï­#7¶‘qä Õë!ޤp\oo–duÂ83 $¦L™"“É$‰^=ÅI;T]i(ž4iqÂ8snÝ¡±—Dêþ]²•[©àYÖ ²º† F@×èp 1}útÇŸ÷íÜJÏ¢Œ#••vX’4yòd Òͬ¥…„ÐE:HDGG+66V’}2EzIœ=»¶~*É>™eӠݣ¬¤€0ºˆ[gvš3gŽ$©ºÒF/‰³¤iïˆéÓ§3™¥ FÀ™ëT O/‰³ŒÞ®c6›&Œ€®áÖÙ›ö’Øùå§TÒ…èáZ“'OVll,at!Îî¯1cÆhß¾}:°ë+Å þa‘T´›ÙÊ­úlýJIôŽp•ñãÇküøñºÛ™ì‘epp0EÐkY­VeddH’RSS%Iéééš={¶¢££[Ü';+KëׯoóØÜÜ”T\Üâ{eÙÙí>Ç]¯½&£±Å÷N;&I²Ùl:xð`³÷ƒ“[¯¿^Õ11òôòj¶MZZšV­Z%IŠŠŠ’Ùl–Åb‘Åb‘——×ië@ ÑDtt´fÍš¥ 6¨ 'K›×¼£é×ÞÄß°.r8y¿ìúJ’ÑjÒ=ѦM›”””¤òòreffžv»‹ª’“Á­¸X•%%ö ¢Âi[³»»¼ÝÝ^ 4ä_Y©âSzN45-$¤]çlÍÉ9í{æŠ òó“$eÛlÍÞÏ­ª’$enݪÄ]»œÞóoè=‘S]í3Z 5${ùI“&iþüù§“ ””:tH©ûw)_¿JÁ¡,zlåVmY·BÕ•ö´íž{îaU g=|HMMUzzºÒÓÓ¾Ùÿó#¨,;[¶¢"•eg«,;[µ =Ì%%êo0Èàî®@OOÜÝàé)ƒ››‚N@œë‚ŒF]æôZYuµÊjjT]W§Âª*UÕÕÉ»¸X?nÚ䨯Ý`Oh¨L2ùûkõêÕNLjˆˆPTT”ã=5¤è±é8Ð IDATÒ@ÂÛÛ[÷Þ{¯-Z$IZóï%ºö–{%:iÍ¿—8æ¸ñÆ™ÀY±nÝ:¥§§+%%E¶†'4úzÙ2ùxz¶ø^¼ŸŸâ†8àô|<=5<]ŒÚª*§§«8=]eÕÕÍÞÏÌÌTff¦¶oßîx-""B=öXj«[W0::Z·Ýv›$©ºÒ¦5ÿ^¢’Â|îªÚ¼æGqÑEiÆŒÀYñíÎÚ»wo³0¢¿Á 8]¤i!!§ #Ð}|<=õ³˜%„‡kjp°Fùù)ÂËKæSæÖèWU¥¢†‰8{ î8è”)S$Io¼ñ†cåko¹G&3à Úcóšw”ºß>éIll,K|èViiiNó?”ž8¡â´4;¦¢cÇäkµªÂÓSƒBŒF }v˜EOÕØ³¢i¯Š²êjVU©°ªJÞµµÚûÖ[’ì“jú„†* &FÁ Ã9V®\©¨¨(;ÖeSxt×§L™¢üü|ÇÊÃ7%ÚFDDDèÞ{ï¥(ºÜž={”””¤¤¤$Ùl6-úÍo”—šª¢cÇ«Z4ºÀb¡`½PK!…$ÇpÌ]»än0Ȧ-_~éxìØ±7nœâââÜmççÑOHHP~~¾¶oßî%¦Í™Ïœ-°•[õõƵNaÄC=Ä$–ºL^^ž6oÞ¬ÄÄÄfÃ/>~ãN¯Þ«¶ªJ©©2»»«¼¶V’´wï^íÝ»W’} I“&uËœ†ÝݸÆáMC &ºtf+·:M`I +%''kýúõN+b4ŠðòR¤—aDd4ê'‘‘*¨¬Ôá²2eTT8‰íÛ·kûöí ÒìÙ³S4tW4náÂ…²X,Ú°aƒª+mz÷_/è²Ùs5|Ü…}þÂçfeèãw—©¬¸P’}Έ{ï½—0@—X±b…>ûì3§×ú ìã£H³YÆS&?Dßd4*ÈhÔ’ *+•RZªŒòrU×׫  À±„kWñpUÃd±XôÆoH’>_¿JùÙYºøªŸôÙ‹}8y¿¶¬[¡êJ{W©‹.ºˆ ,t©±úL’g¿~Š4›5ÚߟÕ0Ц £Q“ŒFUÖÖ*£¼\‡ËÊT½c‡’ËÊ3uª¼Îø3<\Ù )S¦Èb±hÉ’%²ÙlÚ¿s«2êsóJœ:_„$Ýxã,í  Ë䦤èÇUYR¢©ÁÁ 1™è 3º»kˆ¯¯†øúJuuÊÞ¿_Ùû÷+æâ‹1a‚<½¼:}l7W7&>>^úÓŸ+IŽy%öîø¢O\ÌŒ#µú_/8“ɤG}”0@—¨(*RÒ›oêûÿüDZZF”·7aºÔ±¯¾ÒŽ_TnJŠ$û„©åq6NÜÛÛ[?ü°Ö®]ë˜W"qãZI9 isæË/ðÜ[RÆVnÕ®­µçVÇkÌ +äåå)88X¹))JY·®ËÇú-©­ª²_è?;vt¸ç¿ÇÙ<ù„„ÅÅÅiÕªUÊÌÌTVÚa½õâŸ4zÂT]0õ ™ÌçÆƒúÞ_hç—Ÿ:æŠ0™Lš3g½"œ±´´4=ÿü󣑧,å ¸ÂG»ì#V¯^-Ií~Öõ8Û'¯Ç{ÌÑ[B’öïܪä};5öÂK4ú‚‹{m0‘¼÷íÝñ¥c9OI3fŒ,X@¯]bÙ²e²ÙlÚ›’"KHˆœÁ˜~ 3f„†êËÜ\UWkõêÕŠŒŒT|||›ûyô”$$$hòäÉZ¹r¥öíÛ§êJ›v}ù©öîø²×É{¿ÑÎ/?u,å)Iš7ožÆÏÝ  K¬]»V™™™’¤ó #pVøxzê’þýõQV–ªëëµ|ùr=óÌ3mîçÑ“¬ûî»OÉÉÉZ·n:äL Š¥ac.Pä ¡=î”æ+eÿ.%ïÝÙ,ˆ˜3gަL™Â] ôp•ƒª  ôB^ååò¨«£úœmÛ¶I’ú Š÷ó£ 8k|<=u^` v¨  @{öìió yžØøøxÅÇÇ7 &R÷ïRêþ]òñÔ˜‰S5(nÔYÓVnÕуß)yïNe¥vz èùʽ¼T "?ÙFôjž••yà;  OIKKSAAýŠ0=À__í.,Tu}½RRRzg Ѩ1˜ÈËËÓºuë”””$›Í¦²âB%n\«Äkåã¨Aq£3Dáу»}XGnV†ŽüN‡“÷;Í Ñh̘1š>}z»ÆËp½77å…†*ߤj£‘‚çþ>è‹ÊËËöts£ è<=•[U¥ôôô6·õè ÖÂ… eµZ•””¤¤¤$íÛ·O’TV\¨ý;·:–Ó Sph„‚„+84B£IýÃ";ü™%…ù*-.TiqòNW^vf³^"""§3f(88˜;è¡rú÷׉ð0Õy8ÿÓg U`ÔpI’_è@ ô"y‡“”ûc… ‡(¯­m÷¶½©aÞÞÞš2eЦL™Òb8!I9Y*ÈÉRê~ç}=&‡†·ù¥Å…Ns@œNDD„&Ož¬qãÆB=ýE//¥ÅD; Ëð ‰QÿØñò0H&ß@ŠôR%ÙG)€>«ésHŽÍÆ„–8ëʪ«e=W‰¦š†’}üTjjªRRR”žž®ÂBçP¡ºÒvÚm1™LŠŒŒÔ°a稨(–íz‰ü  eFE:zEÌþŠw¹B‡žGq@¯–——çøóa«Ucù’g×î&ÏásæÌis{s¥áÑÑÑŠŽŽÖŒ3¯%''K’RSSU^^Þ®1,ÁÁÁ²X,Žÿ>½W~PÒ tü1æ2EŸFaÀ9ç’˜©ßL]íÇÒReTTH’Æ…h×¼ŠçrA À“@ßÓ4Œp÷4jÈ”ŸÊ3‚€sÒ‘/¾Ô€k®VUI Å€Ë%kwQ‘$ÉT^®‘ýû·k?¦bpÎ)ò÷w #F\µ0œÓªË­úþ½÷e`ùO¸Pem­¾ÌÉq #bSReòôl×þέ ¥ Œ‘t2Œð±„SpΫ*)QÒ²åò`rK¸È—99Žaa„G]]»÷'pNI‹‰qL`3ájÂÐçì{ëm•dP­Á 5Úßðí5Ð•Ü M “$æåw8ŒÎñ9$ô-Eþþ²úùJ²O`ÉJ ¯ÊضMYGÈ®ýÅÅ:\V¦Á>>Šóõ•ÑÝáŒtxó;¦áƒŒUU:€sFfT¤$ûP°áQЧy—–ʳ²RÕF£¬µµÚ_\¬ä’Ezyi˜ŸŸ‚ŒFŠ„VUÖÖ*ÇfSTÃÊ“MƒˆF§†ùAAÚ•¥ê¯¿Ö”)SZ=>€sB~Pªþ£9îryšÌôi~eV<ðòƒ‚”"›Ù¬êúz)/בòrxxh°ùøÐkN—•)£¼\òì×OaÊÞ¾Ã)ˆ8íïå– ¥ŸÈRib"€¾¡8 @’d0û+|Äd ÎyÁÁÁš5k–v½úªŒUÕ§ÝÎRP KAJ|¼Uh Va°E’TTS£ÝEEäãC1û¸ÊÚZeVT(£¼\Ù6›ªëëïU××kÛÆM²tùçHèýÿ€ * ´Ññô ÁÁÁJHHPæ“Oµk{¿2«üʬ•¥â€X‚d¨¬RIU•"Î;Oõµµª./§°}HºÕªýEE*ª©iöž[M­‚òó”_ sÃJ]@@¯WÖ$ÕŠNAZa¬ªRHNŽBrrTãæ¦Üº:åî? Iò‰Qè˜Ñòéß_Uååzÿðayöë§P//… 1™Þq±yz:…n5µò/*’Q‘Š‹»ýó $ôze¾ö•5Ü= LAÚû@xÊ2ÅÇŽ9æ ¨4d=J’TTZª”ÒRI’·»» … 4êåE!{˜‚ÊJUW«°ªJƒ¼½˜üü¤úz•df*ã›*³Ùd2X>¥¥ÝÚâ´÷— @oWið”$™P €.â^S£ÐãÇUêã£r??ÇëÖÚZY+*”Ñððàé©kÂÃ)ØÙø=¸¶VEUU*¬ª’µ¶V…UUÊ©¬tÚÆ×ËKY'”óÝwª*)qzÏ()þ‡ä³vþz½Æÿ@š|)@W=,ÖÕ),ë„Â~.ññ–Õ×W^fU˜½+œùÔÖª¦ºZæ  ¹¹{ÈVTètœo dps“g¿~ 4äíá!OO |†öêÀ)CKŽ»[599=óã28W¼(@7iœ³Q››ÊÍöá©;w9ÿ^æç§‘#åáêq*owwy{xÈÛÝ]>žžÐ7—Ënèib­­•µa>‡l›M’t^` c¸…›Á ƒÙ[õªWme¥¼óòœŽãVS+S¹UæŠ y•WÈ«¢ÂåC0:Š@Ðñ‡Éº:§€¢©ª’elÛ¦77™†ÅÉf67ÛÆZ[+km­$ÉÓÍMçGG7RÐh_a¡r*+h0Èàææx=¤áa½Q€ÁpV&Ýl:ÑTksklÌÊRî)Û·¤07OîîÊØ¶­ùg °‡åÍæé÷ ÷INNÖ /¼ ž§¤œ68«œuuŽ9 * UÞ m=¼†´«ªêÓ»Y t~x¸&N™Òæ¶€-2#³Å×˽¼Tã~r‡¹¼¢Åá ’½‡†_d¤Óö-)>@ŸÕWÿ ¬swçâzŒŒÈHMÕá+4þüV·%Î6ooý8,ŽBè³ø7€³¯ÜËKåÖ2ù¤§·¹-C6€^Îl6S€‚ƒƒ)ÐÿÞÏš5K¡ÇËXUMAÐëÐCèå,X ôv¤ç²^x€|ðÁ>Ýþøøxn  VBB‚2Ÿ|Šb W"z9ooo~ Èèu²\Ž@¸À¥Šò²µpÊÝ5mêjkÏé¶–æiá”úõœÑgåó sOhᔺÿêáÜx Ça hÇô§«^Ñß~¥ü*/-–·_€b†Õ´ëhÜ”+šíSVR¨Ô¤í:ï’«)à)ަì“$EÅŽ›»{—·'ÔüÔsÈ<œ,IŠ:꬜ϱ†Z6†À9---Íþïmt4ÅzzH@+2~üA¼íríܲVs<¨ÿ·úýã£dÝùØ‹:‘vHûÝ­Ú¸êU§}êjkõì}?UÊžm°Å‡ä½’¤˜a]×k 'Ô¼¥s~þÅZúõ =´ø³rNáÏÀx ç®´´4-Z´H‹-ÒÊ•+eµZûLÛ­V«RRRTêã£7í@ ç”·þú¨ÊŠ ô‹ß¿ ó¦Î”Éì-/oœx©î|üEöÓñcöÙòþ2eNÖ áã(`kÉÃÆvÙ1{BÍ{âu?Úþ ŒËàœU^^~òßâ-[ôÈ#(11±O´===]‹/ÖÃâTnöâf@™‘©™Cb5oÞ¼6·eÈ´âÇ»%IƒGœ×ì½ØQhñ{??±ð KÝïøùåÿ½W«^zJ‹?Ø£¼¬tmø÷?t`Çg*Ì=!£—YCF^ 9 ÐÐÑûÔ××ëž+†ÈËÛWϮئ·ÿöG}ûùYDê—mÒ¾m›µqÕ«:üÃUUÚ£‹¯ž§+æÝ)O§óÛ·m“Ö,]¬ôƒßËËÇW]qn¼÷úÓÝstä‡$=»r›B#Ù™++чÿþ?íú|½ò³3e4™5tÌ;í y^›uêÈyK±×((4Bÿ|âíß¾Euµµš<óÍ»ÿqŒ'¡úrÝÛúêÃÊ:vHU¶ ‡EiÊÕsuõÍ÷©_¿~§­ùóïïî²:?šªµKë‡Ý_«¢¬T–‘ºøšyšyÓ=r÷ðhõº?xíXåeëÏ«v($"F’Ú}/HÒ½W•úõÓ3o¥7Ÿÿ½¾ßõ¥ &/M¿þšóóÚ¼.ÇZÚÓöŒÃÉúã­—)4j°ž]áüK½µ¤H¿½þõssÓ_ÞÝ)³¯»ïŸ¶îoè 6›MË—/Wbb¢,X àà`Џ¹¢B~~íBE h…—¯¤“ß4·æ‰¥µøƒ$I’–~}B‹?Ø£ÃßïÑc?Ÿ¦|«»Ÿø§^ü$Eÿýÿ¨ 'S¾ÿ:Ü÷ãùÙª¬(WHÄ ½úÔ¯4jâ¥zaM’~ÿÒmûä?úë÷¨ÒV®Ç^ûX_ÿ†Ž¹P«^zJï½üŒÓ¹ìülþú𭲄FèOïlÕ“Ë·¨(ï„ÞüËÿèhò^ùXaDNæ1=¾`º¶®[sï{L_@-~[YÇé™{¯UòžÖ¿eêÈyäª07K£v}¾^×Ü|¿ž{w§&N¿V[Þ[¦uËþêØöÝþIKŸý¢‡ŽÔ3ï|¥çþ³S¡QƒµzÉÓúàµçZ­yWÕñÐ]úßÛ¯RMuµ~ÿâZ¼f¯wÿ¹Hï¿öçVÏ¡(/[EyÙòö t„»2Ua-U`ÿúàõ¿èg<¥'–n’››»Þ{åY¥îÝÑêu)Ì=¡¢¼lùø)8,ªC×*4rÜÜÝ•wìbÌ’¤ƒjåÊ•èÁ$ WÜx‡$éoÿýs½÷ʳÊh˜¤ðtŽ$ÛL'¬ª¬ÐKø¥êëëõ›çßÖQçËh2+zè(Ýô_Oª¶¦Z¼þÇþ‡~$¥ÿø½¦\=W¦%ÈÓh’Éì­Ý_~$³¿n}èY…F’—·®]ø$)ñ“wǨª¬Ð¿?"ßÀ`Ýþèß< JþAýõóß=§›ÞW}}½bâìó7ÔÖÔè¥?Ü¡¼¬tÝ·è57u¦¼¼}5høxÍ¿ÿ ûù5<üŸN{ÏK:9„ÁhÒõwý^ÑCGÊìã§Ù·ýZ’ôÍæ“¦[Þ[*IºîÎßËÇ?H~Ášÿ«'döõw ˆN­yWÕ±¦¦Z¯>y¿|üƒt×ã/j@ôùøꆻ•Éì£C¾mõŽž2¡dGï…ôCß;‚…îzDA!á ‰ˆÑ¸)W6„»[½.-MhÙÞ¶{Œê£ÚÚåHwº·6­~M^>~ºrî/;|ÿ´v] ³‚ƒƒ5kÖ,E¾Rñ—êüY)|ØÅr÷4)öÂyª¬®§H@Å hŬ[ÿK’´þ¿iÝò¿jÝò¿*,f¨.¾fžfÜx»ÓðI:šì~ ¬´ƒÓðºsËZKݯøó¦(nìENÇ2ê|ûÃöI­Ö¨½çeoŸ}hä+¯—_ ãõÀ0Iö^Œ^Þª°–êÇ»4ú¢i’¤ˆzñã”VkÞUuÜþé{ÊÉ<ª9?@žFÓÉsí?@K6jû¯éÔ½ÐÞL¹z®¼ýNnܯŸ$ÉÍ­õJNNh9¶S×*,&VÙ釕~ØÑ›æËu勤(_ ’Ù×_Û?}¯C÷Ok×Î$HHHPéÖ““YÆN¸N1c®R^…—>øÆª11 4P, ‡¡‡´¢_¿~š}Û¯µøƒ$ýâ‘¿j̤éÊÉ8¢ÕKžÖ£7_¢ÂÜ­>˜îþòcIÒ˜I3šÛÃ`ÿŨ¦ºªÉ›}ÿ‹¯i{ ºº:Ç·×ýÃc¯ïßñ™$)nÜEÍ?ÓÓÓé!¹±GBKK—zûÚ‚ëêë:T³Ó—ý!Ûþ@:bÂ%N¯×TÙkài0:^›yÓÝ’¤¿ýî6½òäýJIjy–‰3¯ãž­ök7tì…m£µsèì½p¤á˜ñã&9m›—e_Ún@ô6‰½Ž‡ÿÎ\«°˜¡’¤ìŒ#’ì½i>Y±D^>~ºjÞº:r]àLyí½¯jêúi÷‘jmÜ[¡‚²Z ô ô€v0ûøiê¬ùš:k¾ sOèõgÔŸi鳿Ñož»ÙƒiãJ ÇRíàQ±#›³1ÌìÖì}øùS›mŸôõF}þÁrKݯÒÂ|ÕÕש_÷åQCF4;FäàøfÇ(ÊÏqzHn|è]ñ'´âO´Øöà‘­Ö¦½çe mùÜ s³$IA!áŽ×®š·,"µvébmûä]mûä]ÅŸ7Y÷<ùŠã[ý–jÞUulýš¢‡Ž”ÑdÖ^~Fëßø›¢bO>L6~ãátŒ’Â<dgÊ7À¢àQNç÷ÏM‡eô2w¸9¯Ò¢|4 Éð p®Çñ£©->d_pÙl]pÙl¥îÝ¡wþþ˜’w'êõ?= žû÷ikÞUu,noüƒBZ­AKçÐ8¡eKµnϽи¿_P§¢07K%¹  ËéÏ«±¾YŽ´ÝÄØ{Häd“$}ô֋޹#uäþiíº€+¥¯ÑšK•“v@{÷îÕìÙ³5mÚ4y{3Ÿ Ðʽ¼t¼´Tiiim~ Ù€üóñ»õë9£•{<­Å÷^ö_Z»ÚK-w‘÷òö“äܿѾDû2‡ç_z¤“ß·ôÍ÷Gï,‘$Ýñ‡¿kèè Ž¹!Ø%IŠzò[÷ºZ{ywOçå+w}¾Þ~ü&ççáÙpþý:W§ŽœWcïIrssþÏOãê#'^ÚâçĽPÿõìrIÒw»¶¶Z󮪣ÉÛ·]5hé{LÄtò^p 9e¸ÅÑ䆶 ëx´ÝHØ{†äŸH×þí[”ñãºâ†;œæ³èÈýÓÚuW3ùŸì‘·~ýz=ýôÓJNN¦0@ȈŒÐG‡Ú·Ê ´ÀÃÓ ’‚\mßø^‹ïïÞú‘$iÔ„“Ðé +4í’?â‚‹%©ÙÒ™ùÙ™úfóZE Ž×yS¯vz`6¶ÙççgK’ú‡ŸL™Óp<ÈGîx=$r $)7ó¨ãµJ[¹>~{I³‡Ü!#Ïo8ÖwÎÈÛ6ë÷7MÑÚe/´Z§Žœ×±† -%û* ª«*õÍæ5òö Ðù—Î’$­zé)ý÷Ü‹='$©¾a.ÿÀþ­Ö¼«ê9Äþçœ&u<¸ï=0FËŸû]»ÎaP“@ 3÷ÂÀSÚàh[ܘ6B’æ+lt¤í’}˜R@p¨ s³ôñ;Käåí«+æŽèÌýÓÚuW8v¦Æ_ýùõ,I*((Ð /¼ —^zIV«µ×´Ãl6+66Væ’yÔÖqaÑëH@ þF¾­]ö‚>~g‰òOd¨ºªRùÙ™Úôîëú÷ó¿—%4Rsï{¬Ù¾%¹²•Û™™{ïc2™½µâOèhò^UUVèÈIúÛïn“Éì­{ŸzEnîî lߪ7ÿyXÃĆŸ½¿\U•:ðÍç:üý¹»7ywùO~.IúÏËϪ´(_Ǧêíþ ߀ û/aM’zÇïäáiÐ[/<ªôCß«²¢\»·~¬Wž¼_ù'2ÝÂ|=¯ÆoÈ'NKЊ¿?®ÜãÄ'n IDATi*.ÈÕ›Ïÿ s³4ïþÇO.ÿX_¯œÌ£zçï«8?GeÅZ½äiIjöP|jÍ»ªŽ³n¹_’´nÙ ²–éXê~½þ̃²–iüÅW¶óNºâÇœRÃÓ·Í9h>ÉfGÚÞ(,f¨*¬¥ú~×V͸ѹwDGíó3ŽìÙÆ½Oü–;.Žéh”‘¶HHTË>›LÒŽqžšñQaÞ"JHHGÒ’„„Ù¹†¼?ëW̸;Ö±Tcµ­ŸV­J1ü¶{$i–ûÿÍ‘=Û ïÏ­“§) "Ò%øE0lÂ+Äüø)vŸ6ðåþB Š47ƒÈ•¢e?ED®!ÞþÝÉÉ8ÇÿÍy‘G^œ‹›»™Ý[Ö²aù"ÂÞÈèŸNU¤Qee¥X“ñþ¬_añôá©Yïc0¸^×f³ÙHMMm²œ»»{£Ë”%%%5«½¨¨¨÷edd™™Ùdþþþ\ÑëIII¡°Ïž‡„„4¸,b[\O{Ƕ±ëiïØ^é{¥¹±m{¥¹±mÉõ4§¾–r÷ô =¯‚u{ìô 1Ó˨_"JHˆˆHC&<þ"Þ~$¬_ÉoIyY9݂ø󡧹ãþ§p5º)HÒ¨—ïÿ!YÎ;œŸ‹îÁ×]×µwï^.\ج²M 1;wn³êiì’„„Ö­[×dwÞy'ãǯwŸÕju SoÍõ|ôÑG;v¬Ézf̘Attô»žöŽmc×ÓÞ±½Ò÷JscÛ÷JscÛV÷J[(«pâû”R¬e ék¤$ï G%>>^¿4D”‘jÎÎÎÜ~ßÜ~ß †\–?¬ØyM^—Ýn'°G0çÏ¥5Y6=·‚eÿný²Õq*¥´Yu|ŸRJ~õdŸ-jVM]Oznó†£o ûÄ ÊŠŠ¡²’R»ó²íwøú­?qÛmuêýê«x…„}âË'M&áOâì¾}çæRQVNqn.g÷îeûÜwùhòdrNŸÆ§W/~ôÊ+zÚFHˆˆˆˆˆˆ\¦€°¼ûrúÀÎ$mÃË¿'A¡ ÌdòñàBbb£åÿë_œÜ²…ÂììZÛ  ×Í7Sj³±î×3È?s¦ÑzòRÓøì×3˜øáôu Ýú÷çÂáÃz!Ú€FHˆˆˆˆˆˆ´‚«›…ð'0pÌÇü„O¾µqü\©s…䥦Ðïîñ¬¬¬“Œˆ¼c,‡þõ¯&“Õr­VŽ|ò Q—ÖSBBDDDDD¤ øEàéBY…;“Kør!E L;°|97Ϝɭo½EXÜ\ŒÆftqòÑ“[¾jQ»Ç7m®u¼´žÙ‘kÞöíÛÙúïrl• ºõ)DDÚEz^Ÿî*d`˜+Q=]9ÖŠÙlÖ¤—­tä“O0ûû1ô¿ oüúÆ¡¢¬Œ ‰‰œÛ·Ÿ´Ý»Iݵ‹òâúWVò  ëøñµ[]Þ³G½mD ¹æeffrúä1BD®ŠïSJ9r*‡=Ÿ- 7'‹»îº‹qãÆ)0­°çý¿“´î3®ŸðS®»åüúô!pÀ `ÐS(+,äȧ«ÙµhÅùùµŽu5[(-,lQ›¥öªeU]-z‘̹£G9±|9“'On´¬""""""WعÔãäæd°víZxøá‡‰ŽŽVp.SÁ¹s|ó×|ó׸y{4h= "ä¦éÖ¯'O¢×Í7óÏG¥03³VbÁÍË 7OOŠrršÝžÑ£*Qj+PðawwÇn+ÀÃjm²¬æ¹ÂÂb<öY,>Adeeñî»ï²zõêË®344”3fÐ') ³½°KÇ·87—S_ÍÎyóøøÁ‡Xqßýd&'ãÌð§Y«lÞʼn,»µ0äYu|Zšnè6¢„„ˆˆˆˆˆH;ðô¡ÿ-?ÇÍâëØ¶nݺˮÏb±W C…&Ϭ)39™/_þ-×Ykß¹}û¿õÇ-ª³ÏFp滽 pQBBDDDDD¤d¤à»ÏæPl«ZŠÒd21}út¦ÂâFpÓOàîïßdÙê‘ 7·ZÛ“.&"︃€¨¨fµëJôøªeF×¬Ñ ÑF”ifO_ÊK‹6l³gÏf°–l‘!?ÿ9Cý9Ã~ÙôŠI·Þ Ô]Mã‘#œÚú5ÎcçÎÁ?<¼Ñz<{öäŽwßÅ`2qô³ÏZ¼:‡4L“ZŠˆˆˆˆˆ\a½»»0dXýýÂßß_“Y^¦o,dÜ_ç=nÞ!!\±’sßÀž™••˜||ð ¥o|<×ßsû?ø N=_ÍšÅOûôÆ;4”ŸýßÿräÓÕ߸‘ÌcÉç`ôðÀ·÷uô5Š?û“‰Œ¤$þýÇ·õ"´!%$DDDDDD®³†G¹Ñçªë§ ´BÚîÝlþ¯rËË/4x0AŒ0©(+cÇ{&yý†:û ³²ø×£1æµW >œëv×ÿìžë:öå—|=û÷”Ô¿ÂÆ»w5zÞ †Þ¨¯JHˆˆˆˆˆˆ´‘ÂüL F®n„t›‚ÒÆŽ~þ9Öo¿¥ÿOî&ä?À·W/ܼ¼Àɉ’‚rNŸæÌž=Y½š¼Ô†WÄ(ÌÊbíÓÏrÓMDÜ~=bbð Âå✙ÉÉœùn/IkÖp!1Q¿”‘kÞˆ#èÙ—ïN”*"rÅœ>°Ô#[¹.úFž|ì~Þ NÓ#""""""-tîø·ßý‰cO“ÉĸqãˆWp:9ëÎ\¹’÷ÞKȰpï°8öé¶¥„„ˆˆˆˆˆH3ægrtÇrrÓ;¶ÅÄÄ0iÒ$ kÄ¿ÿðG²Žãú{ïÅ',Œò’RrSN+0mL éR‰ô“»›,g4ûþƒ÷Ÿ=ö %öœ&ëés[ƒû²Ï&“wáD“uxuëÓèܧlh·ëéÞ{(îžþ­Šm[\O[Ŷ-®§=bÛž÷JS±mÏ{¥©Ø^z=.®&BúÝrå;PF¶œª•|}}™4iƒYzR:¯C«þÉ¡UÿT ”i£Á SeV³:eþA}ˆ¹aXäžÞEæÙ¦;ˆCo¾½Á}é‰Ç9}à‹&ëˆz+Ý¢"Üÿu;^OØu}èæUÿ7ÀéyÍ‹m[\O[Ŷ-®§]bÛŽ÷JS±mÏ{¥©ØÖw=ÞÝûâérå:OΕü0ÖÆ‘‘‘Á¸qã4W„ˆ""""óópah_ÿnFY_‹3·27¸ÿÎd6£žÆê(:íJsVµïèÚh=«Úñz†ö5]=‰nÍ‹m[\O[Ŷ-®§=bÛž÷JS±mÏ{¥©ØÖw=e%Wne‹È 1½Œ¸¹:¦y"DÚ„Seee¥Â "Ù´iÓŽEØàÑ ˆH’²w3i¾`Ñ¢E ˆH”˜˜È»ï¾ ÀÀ1O´Ùò¾¥Å6\Ý,x›Ú×HC‡¾ö>IIxØtCÈU—éçGÀðá ¼õÇÄÅÅ5ZV#$DDDDDD.ÊÏLåÄžOqu3ñÀÔéô 1*("-éï‡õÜYò”iJi±3IÛjÍoQtá„tÜ +¸óÎ;Ùýßÿ[IéU?Ÿ'v荒­²¼œâü|òÏžåÜþ$oØÀùï¿ï°1­¾-ïÙ>œéʲÏ&óÝgsk%#î¼óN"##;ôy0~üx‚Ξí¤¤Cž£“‹ &ºõëÇÀÉ“˜°øïŒ_¸mÖÆÀIëM†Hǧ"""Òi•Ør‘.Îl6Nzn£{‹Ž--¶qtÇGd¦tl gÒ¤I„……)¸—©æè'Ü<=ñ§÷¨[ˆ7Žà¡C™°x1ÿœúçÏ·º½nýú)è”"""Òiåg¬`ˆtQaaaÌœ9“eÿnÙ„Ž©G¶rúÀÊK‹0™LŒ7Žx­ Ñ¦*ËË)ÊÉ!m÷nÒvïfß?þÁØ9sˆŠâdzgñ¯GkuÝ¢•謔‘N«¬¤ª#áïï¯`ˆH³•ÛH;²Õ‘Œ6l'NÄb±(8WXÁ¹ó¬}æWÜ¿êcz D؈¤$$Ô*còñaðÔ‡é‡gPÎöŒLÎ|·‡ï/!ûäIO}˜a¿ü¥ã¸êÇ6¾|éeŽ}ñE³ëiHÈM7qÃ#S ˆŠÂàæF®5•¤Ï>ãÀ˨(+oñùÖ8p 1÷ßG1˜ýý(+.&ÿÜ9NnÙÂ÷­ (§îÀÀôÀÅÆbòñ¦8/óßdÿpfÏ%$DDDDÚKQ~6…9UC}£¢¢i¶ë‚<òèT–ýc)S§N%::ZAiG…™™|¿ü#†<ö(}F®•ð dÂâ¿céÞ½Ö1=‰¼ãúŒÃ'?΅Çm£5õT”•sÝÈ‘Üþöqrqql÷ ïËðgž&pÀõlxá¿.»>£GsëïgתÛèêŠx8þááôûÉOXõðTlééŽýQwÝÅ~÷ÛZǸûùqÝ-#¹näù÷ÛïppÅ %$DDDDÚC–õˆãçŽ>ñœˆt f# r£‡èÇ[o½¥ \%'·neÈcûÐ_<†¥{wÒbÛœ9d;@@d$7?ÿÝúõcØSO±æ©§Ø»d){—,­weŒQ¿}¹ÙõÔQYÁ_˜É‘O?eÿ—šŠ›§cÇ2âWÏÐgôhzÝÇémÛ[t¾Õ~ðä8¹¸°wÉR­Z…í &7‚ßÀ_˜‰gP?xò 6¿ú^!ÁÜòÒ‹|·x1G>]-=s@·ÝÆÓ'îÙgIýærNŸîT÷VÙ‘N)ïlÕX___M>'"MjàŽÌ“rÕ?ÃSSp÷÷«µÝ+$„R›M¯þ?Îÿ=e……”rnÿ~¶¼þ4YkêqvuåÜlý{rN¦¢¬œÂìl|ðûþïDÜ>ö²Ûñº8ïўŋÉ?{–в2J lœþ÷¿ùâÅ)ÎËÃðŸ{÷Þ‰¸|»pßÌÿ+y©©”—”æ ß-^Ìîÿþoœ .ôÿéO;Ý} w£ˆˆˆt:EùÙd§&{É·k""P5ie¦õ ñ?û%CûºáçárÍ]cbb"ï¾û. ¹>IIxØ:͹—Úpu¯½2ÊêéO4xLÖ‰UǘÍMÖßÚz®ü¸ÞíÇ6~Éà©Ó½¿Ën'ûÔ)ü#"õòKlŸû.öŒ Ǿôƒ‡øûè1µÊ‡Ü8€¤uëêmçèçë¹é‰'è9ä†ñÚúgfÏJHˆˆˆÈµçÔ®Ï?kF|©)?3•¤bÏ9 €ó…üés¢£qóô 87·Î>“7îHÈM7∻¯/ÎC­ùš£5õd&'×»=7Å PkCKÛÙüÚkŒÿë_ ¿õVúÆÇ“~èi»v‘úí.ÎìÝKeyy­òž={ðÐgë=g¯Ž‘ÈÊbHϞܧ„„ˆˆˆ\[rΜ ÛšTÍŒPãBézRRRX±b鹌îd¦tìóõõ%¤ƒtÒ¤¶€‹“çŸ;W§óýÓ÷ÿK·n­ª¿µõ”Úíõn/+ªZ™ÅàævÙíd$&ñá½tÿýôCàÀÈ ?ÿ9öŒL¾]¸€#Ÿ|ê(é(’†›1r¤£QBBDDD:Ò"»ct„Édbܸq ŠHg·ÛI¾äÛl“ÉĘ1c?~¼ÔAõ3 Îr•ßyK·näŸ=Ë7óçsvß~Šrs©(-¥¢¼œ'v}Û¬ú[[‹›e……u;Ð&SÕï£Â¢VµS˜™ÉÎyóØ9oÞ¡¡„Nßø1ô¼áFýö·¸¸9¸råŶ 1zxðþ¨QRPpMÝJHˆˆˆH§qj×zÇRŸcÆŒÑè©#<<œGyDŸ˜_ß¾DßuP5ÿAMÕó%¬}úirNÕ^1£Gf·ÑÚzüúô!ýС:Û}zõ àü¹6;ß\«•\«•ƒ+VÐÿ§?å–—_bДû ‰\«•nýúás]/Òº¦î­²!"""BÊÞÍdœØçèpè›OÀd2a2™˜>}:3gÎT2¢óéÕ‹±sæàìêÊñëÌÕàb4`K¿PçØÿTVVud u¿[¯9gCkê¸~„z·‡ßv+€#Yq9íÄ¿ù¯ÿœî®¯sLò_Ôz$õ›ªQ±ù¤‚""Ž„Ä{ï½Ç{ï½ÇàÁƒÈÕb!pÀFüú×ÜûÿÃ+$˜\«•­³f×)›}ºj”Á°_>…ÉÇ£‘î×_Ïíoÿ£Åƒ¼´4 ê‘gWWà?ó:ôƒ³Á€»ŸßeÕS­¢¬œà‡2â׿Ʒwo &7<ƒ‚4å~ÝwIë>»ìó'ÌüxÖ,z݇ÑÃ'<ƒ‚þôÓd;æ8ŸC«VQVTDßø1Ä¿ùÞ¡¡8 ˜ü¹þg÷pÛ[¿Ç;, £‡G§»7œ*+/¦lDD:©iÓ¦3аÁ£‘kHÕœë##L&Ï=÷aaa ŽˆtyŽe?¡C,ûùÄî]Í*—¶k_¼øE99uöEÜ~;ño¾Qg{Á¹óüóç?gØÓ¿$rìXÇöCoäîE‹ê,y¹ñ·¿kq=ΦíÜIIA_¾ô2cçÎÅÙPw¥ŒÄ5kØòÚë—}¾žAAܳt î~~õƧ¼¸˜µO?Ùï¾slëOü›o48¢ãBb"«§O§¤ƒ,ý:ä±Ç¸iú´&Ëi érΜàÔ®ÏsF(!"Ò9•cÏÌàÜþýý|=Ö;,›¼~=&o/Nš„gÏžØ32Iýöv-ú¶ôtv-ú¾½{ãß·¯c…Ž­¿ŸÍ¨—_¦[¿~T”•“sêÔeÕãb¬Z9£´°”„Ö<ù$C}”ný¢1¸¹‘“’‘O?åàŠ•­:ßü³gYqÿb&Oæº[Fâѽ;.F#¶Œ ÎìÙÃÞÿý?²Oœ¨—ã7’}ò$±>@ðС˜ýý©(+#ûÔ)Žmø‚}DEii‡x½F„³oïwl}ûmfΜÙhY‘NO#$D®-EùÙX÷mqŒŠ€ª9#ž|òI,‹$"rQG!!U »—áááM&$4BBDDD®º¢ülòΟ"+åÙÖDÇv-Ý'"Ò°èèh-ZÄ‚¡7*Ò))!!"׌¼ó§°îÛ¢@ˆtEùÙÛr(+.t<–QÓ°aÃ7nœf˹F)!!"׌üó§È?JéÄ|}}‰%>>^‰‘kœ"Òé“vqI%é|ï_BCC‰Õ„•"""]ˆ"Òé½òÊ+ ‚ˆˆˆˆH'㬈ˆˆˆˆˆˆH{Ó ‘HLLdÁ‚Õ»ÿÎ;ïltEˆêejÓœe²DDDD:;%$DD®q7ndß¾}W¥m³ÙÌÃ?ŒÅb¹fâyôèÑ“ÇÒ ùüû¬Vµ‘e+sÔ‘“žÆ÷ÛÖág10qâDͱ "")))¬X±‚3§¦b.,TPäª3âDhhh“e•¹Æ­\¹òª¶oµZ‰ŽŽ¾&c3ªÎ6gŸ0Îç—¶è˜K¹yø8êÈÉ. óÌI2»Ý®ZDDìv;ÉÉÉàåI™‹žÆ—Ž!$5!·å¦É“›,«„„ˆHa4{ãæáÓníå§Ÿ¾æc6xt»#"""r-RBBD¤‹è>¸];Ã;–^›«ŸDFFRP\αô"ÝT""""­ „„ˆˆH DGGãÔ›’C9 †ˆˆˆH+èA#iwJHˆˆˆˆˆˆˆH»SBBDDDDDDDÚ"""""""Ò&RC‚Ywô(Ë—/o²¬&µ‘6awwÇn+ÀÃjm²¬"""˜Å¯ý~<•Ûøª€ˆˆˆChh(3fÌàÓéÓ1Û ét”iÕ«W³nÝ:†?üúoÏÕdƧg¢£|©Åb±Í–›‚!’æ‘v§„„ˆˆˆˆˆˆˆ´;%$DDDDDDD¤Ý)!!"""""""íN iwJHˆˆˆt`EùÙX÷maÍš5ddd( ""â`³ÙHJJ"ßÃ2guí¤óÑ]+""Òåg“º k×®UBBDDj±Z­Ì;—ãQ‘ØÍî ˆt!©iÜÞ7œI“&5YV iæÂB‚½¼ k²¬Aái>Âz÷%ÛV¦`ˆˆˆˆ´‚"""-GŸCÙp(GÁi=²!"""""""íN iwJHˆˆˆˆˆˆˆH»SBBDDDDDDDÚ„ÝÝ3ùù¤¤¤4YV i©!Á|~,™>ú¨É²ZeCDD¤3yú3ŠØP  ˆˆˆ8˜ÍfÂÃÃ9óÝwÊ+ét”iíÛ·³u[Y¶2Üþó+ÞžÉÓ—°Á£?"P … IDATÁ‘Z˜9s& †Þ¨`H§¤„„ˆˆH dffrúÄ1BDDD¤•4‡„ˆˆˆˆˆˆˆ´;%$DDDDDDD¤Ý)!!"""""""íN iwJHˆˆˆˆˆˆˆH»SBBDD¤Ë9s‚K_aÚ´i$&&* ""â’’Â;ï¼CrDvwwD:ÿÌ,b{0bĈ&ËjÙO‘NÈn·“œœ ^ž”¹è»féü³²Ò³'7ÅÅ5YVw­ˆˆˆˆˆˆˆ´;i#FàÜ›]'  ‘VPBBDD¤(uõ$¹8GÁi=²!"""""""íN iwJHˆˆˆˆˆˆˆH›(sn~šA ‘NÈl6;~.5º) rÕ•9;spp,ïïýŽíÛ·7Y^ ‘N(,, ___\Õ­“Ž!+ Àñshhh“åµÊ†ˆˆH dddp2å¹g ðê}ÅÛóéÙ‡á¿ÎÔ ¾ˆˆÔ1uêTºy{³î±_`S8ä*²»»s.¨ÁÁÁ„……5yŒRi"""-ÀÒEó8üÅbCDD®ºèèhüƒ‚¸ùùç ¹jìî Â`Àd21uêÔf§„„HçääTç?ƒÁ@·nÝ:t(Ï<ó ;vì¸"mõÕWôïߣÑH÷îݯڵ‹ˆˆˆtv}~ô#~öÿÃÕbV0¤]åyXÉ€‰'6kt(!!"õ(//'##ƒ={ö0oÞ«ÕÚ&í¥¥¥‘žžÀÖ­[),,ìqسgné´ºEG3ñÃ0iÉœîÖ¢eEš+×LJŒ‹É/“ÉÄôéÓ[”Œ%$D¤ÕlŒ=š÷Þ{ƒË™3g˜ÿüó:evîÜɽ÷ÞKPPF£‘ÀÀ@î¾ûn¾úê«:e‡ F¿~ýÿ®o.‡ .ðüóÏÓ¿, F£‘°°0|ðA>\ïy65'Dcûßzë-œœœ8xð`­²Ë—/× """Nhh(111T œëÄÁÁ±œî†ÝÝ]’ËND yì1X³†›¦Oã¹çžã©§žÂb±\V}JHˆH³ôèу§Ÿ~€U«VÕÚ·dÉn¾ùf>þøcÎ;çxäcõêÕŒ=š¿üå/-jËjµ2xð`æÌ™Ã‘#G°Ûí”––bµZùÇ?þÁСCÙµk—^éŠò³±îÛš5kÈÈÈP@DD¤Yxê©§˜1cáááŽíÙíßÄè(Ò»uÓ¨ iPŽ·©!ÁXk%"L^^UÛ/3¡„„ˆ´ØÝwß À¶mÛÛŽ?ÎôéÓxñÅINN¦°°'N0kÖ, 3fÌ ))ÉqÌÎ;©¬¬tü»zîŠj¯¿þ:iiiÜtÓM$$$ŸŸO~~>Û¶mcÈ!òÒK/µéµýæ7¿©÷œÚê‘Ö$$R÷oaíÚµJHˆˆH‹EGG3sæLf̘Á°aÃþóûÅbáLX(%nn ’ÔJBœîÆA1œ '#0ÞÏ<ÍCëÖÖJD´ƒB."ÍÕ·o_Ο?ïØ6þ|Š‹‹™5kV­$AïÞ½y饗¨¨¨àw¿ûûÛߘ3gN³Ú9~ü8žžž,]º”èèhÇö¸¸8Þÿ}bccÙ¹s§^‘$&¢££7nûöícãÆ<ôß#qÍZN~õ¶ãI×Pl4RàáA®žŽÕ2ª™L& ={^±ö•‘fóððÀf³9¶mÞ¼¹ê—ÙCÕ{Ì”)SøÝï~ÇÖ­[›ÝNuõ¹þúë(((Ð "WEdd$ÅåK/R0DD¤Ó  >>žøøx222 [t4?œù<I\³–3{öuì©!Á”ÝðÈÏÇ£ sYMÚF¦ŸÖÞ×Õ»/&&†#F\ÖD•JHˆÈ‘ €ŸŸŸcÛ©S§€ª‰“süøñµ•‘‘ÁüùóÙ´iV«• .PZZJYY™^¹ª¢££ñêMÉ¡CDD:µ€€€ZÿîM·‹£S‹òò˜ùâ‹””•‘çë€sYùU‰ K~>îv;†Š ²ê5ò‡DDFbÝ»¨ Kll,‘‘‘­žB is{/~`………9¶5w¤B~~~³Û9yò$7ß|3gΜQÐEDDD®‚rn:”¤¤$Ç—Ry¾>AUY›èÄ$¬È÷ð Äh¤ÐìN¡»›—'=­©ôvr" *’à!CˆŒ"øÆ¡Žcì«W[ëïûö¤„„ˆ4[õê£Frlóðð 77—ììl|||Ú¤^x3gÎЫW/fÏžM\\þþþF ...-®³¤¤D/ ˆˆˆH3Y,yä jäjRRG­• °tï΀A±d=ÊÙ‹_^Õ”é燱¤—òr=òцò<,Ø=½ª’nFJ˜œ4èÎ;xè©§¬güøñWõ:”‘f9xð K—.ªæ…¨Ξ={HJJâ?øA›´U=‡ÄúõëkMj púôésrr¢²²’¢¢"L&S­}‰‰‰zEDDD.C@@ÄÅÅUó‰Y­VŽ=ŠÙlæ‡ññ޲É?{–Œ¤£dMât~>Ù5–u-.ÆX\‚{¡Cy&» —ò =þQCþÅyÛ‹‰»¯/'ºwo°ŽððpBCC‰ŠŠêÐת„„ˆ4)))‰»ï¾›’’î½÷^ äØÏž={xçwX¹recׯ_Ï3Ï<ÃĉyóÍ7›Õ^qq1ÁÁÁuö½öÚkŽÄCII F£Ñ±ÏÓÓ“¼¼<öîÝËðáÃk÷Ç?þ±E×\VV†Á H‘KY,Ǫ—ªž‡¢Ï~À§Ó¦ÕÚ_êæF©›6/ÏZÛc÷|×`{y*\\p)¯êœwöÑÕ£j&êå0¸¢’€d$nžž‚ÑÓƒnÑÑdddðòË/Œ¿¿?¡¡¡Žÿ.D é|”yy>|˜•+W²hÑ"l6ááá,Z´¨V¹éÓ§3oÞ<>þøc¦L™Â«¯¾J¯^½ÈÌÌä“O>á¿þë¿ÈÏÏ'77·ÙmGEEñÝwßñâ‹/òꫯâááÁxë­·pvv¦OŸ>?~œU«V1aÂÜ.~x8íÛ·óì³Ï²hÑ"¢¢¢¸pá¿ÿýïÙ±c¾¾¾µ†ÖÇl6c·ÛY¹r%÷ÜsÙÙÙꆹ sçÎÅjµ’™™Iff&V«»ÝNjj*EEÿY±jʧŸæ,i{öPœŸOæÑ£ä§œlù¬ w{!gÏàU`«·L±ÑH¶¿“çëZ\ŒVVƒûÓ»w§¢ÆãÃvwwÊ/yœ¸ÜŹÁy5\-fœ""9×ÈõTë9¨ówygäTYYY©·‰H×åääÔ¬r£Gfùòåtë֭ξ+VðÀPZZZï±7Üp›7oÆÛÛ»Þ¶/ýZ¶l<ð@zBCCIHHà7¿ù Ë–-sl¯>~ùòåÜwß}uŽ3¬[·ŽGy„ÔÔT***prrª·ýQ£FÕY¢´³LN»øÍDpÌ(Ân·vw,}€3fÔû Jgv6·˜ í´ÊFi‘[Ö9nàKhhh»Íz-""ÒRRR°Ûíþ­0þ|8Ðd]·÷ 'ØËë?Iˆ‹ €ŒÊJv¹87Y‡oEÃhøïãTÖz¥!¯<ö ÇÏþQ‘˜jœ×êÕ«Y·n&“‰G‚ÁÿbÂ$22Òñ·ïµþ{_#$D¤^îîîôèу¸¸8¦L™Âí·ßÞ`Ù‰'Ò¿Þyç¶lÙ¹sçpuu%::šÉ“'óôÓO;F14Ç”)SÈÊÊbÞ¼yœ:uŠ=zÏk¯½FHH¯¾ú*GŽáàÁƒµfžœ´´´zëtwwgëÖ­Üxãµ¶ÿéOâÙgŸ¥¾•!C†°mÛ6L&S­sê”â‰'X¸p!qqqlذ‹ÅÂ’%Kxì±Çê='''þüç?óË_þ²Öö’’ÜÜÜèÝ»7ûÛ߸ãŽ;êM¾¬\¹’Ÿýìg<òÈ#,Y²¤NWWWV¯^ÍØ±cë=çŽlÚ´iÇŒ"lðèvkwÇÒW˜1cÑÑÑ×Ôûêlnq»­²Qy†Sß~N —+“&M",,Ll"""rMÐ ‘®ðFwvæé§ŸæÑGåÈ‘#”””pþüyæÎ‹‹‹ ÿüç?Y·n£ü믿NZZ7Ýt äç瓟ŸÏ¶mÛ2d………¼ôÒKµÚ8tèÏ?ÿ<ƒùóç“‘‘ÍfcëÖ­DGG³gÏÞ|óÍ&Ïõ•W^aáÂ…Üxã|öÙgX,Ž?ÎôéÓxñÅINN¦°°'N0kÖ, 3fÌ ))©NªFJ<þøãüâ¿ %%…ÒÒR¾ÿþ{nºé&þò—¿°eË–,Y‚‹‹ ï¼óçÏŸÇf³±ÿ~¦M›ÆôéÓqvÖÇfW—˜˜ÈW_~Žuß–viÏÿ'Æ>ÊÌ™3•Œ%$D¤s)))aøðá,Z´ˆèèh\]]éÞ½;3fÌàù矨õèÂñãÇñôôdéÒ¥ ><<<ˆ‹‹ãý÷ߪŸ¨iÁ‚”——ó›ßü†'Ÿ|Ìf3#GŽäÃ?Äl67ùØÃ¼yóxã7ˆ‰‰aÆ xyy0þ|Š‹‹yýõ×™={6ááá˜L&z÷îÍK/½Ä+¯¼BYYûÛßjÕW=#33“‘#G2þ|BCC1 0€ùóç°ÿ~þ÷ÿ€gŸ}–çž{ŽîÝ»c6›‰‰‰aÞ¼yŒ9’ŠŠ ÝP]ÜÑ£GùêËÏIÝ¿EÁQBBDšòä“OÖ»}âĉìÞ½Û±móæÍäååÕ;̾z>†‚‚‚ZÛ·nÝ À½÷Þ[ç˜ØØXl6[ƒs=|øá‡üêW¿"::š/¿ü__ßZçðÐCÕ{ì”)SjC}žy景%77€o¾ù€ûÞ:ªGiˆˆˆˆˆHë‘®!&&¦Þíœ={¶ÖöŒŒ æÏŸÏ¦M›°Z­\¸pÒÒRÊÊÊê­çäÉ“ôíÛ·Åç¶aÃ~øa\\\X¿~=Ý»w¯µÿÔ©S„††6ZÏñãÇÜYg›»»;ðŸù ¬Vk­˜\jÀ€º‘DDDDDÚˆ"]„§§g½ÛÍf3………µ’ 7ß|3gΜivýÕÇ»¹¹µøÜî¹çÇd“ ,à­·ÞªµÿÒÑ ÉÏÏopŸ‡‡G“ÇÛíöZ1¹”ÅbÑ$""""ÒFôȆHQ3áP_'¼fgû…^àÌ™3ôêÕ‹eË–qêÔ)òóó)..®w•‹šÇgff¶øÜùè£pssãí·ßfÓ¦Mõ&²³³©¬¬lð¿†Fo4Wõ ŪúÑi=%$DºˆÃ‡×»½zeŠšCTÏÙ°~ýzî¿ÿ~zõê…‡‡F£ÑñXÃ¥z÷î T­@ÐR;wîdâĉüþ÷¿§¢¢‚|ŒŒ ÇþðððZçz¥ôìÙ€'NԻ߾}º‘DDDDDÚˆ"]Ä¢E‹êݾ|ùrǘÅÅÅ×)ÿÚk¯9V¯())ql9r$K—.­sÌþýûqwwgøðáõžC·nÝøõ¯Ím·ÝÆÙ³gyä‘Gûãããxçwê=~ýúõDFFòÛßþ¶U12d+V¬¨wÿÂ… u#I»+È<ÃÁõçwÞ!%%E‘k†"]€Á``óæÍ<÷Üs>|»ÝΩS§˜;w.úÓŸ€Ú+XDEEðâ‹/’‘‘AQQß~û-&L //>}ú°jÕ*GòâñÇÇÙٙŋ3{öl.\¸€ÝngëÖ­Üwß}1bĈFÏÓÉɉ%K–Э[7Ö®]Ë{ï½T­na6›ùøã™2e ÉÉÉ”””pöìY,XÀĉINNnõ#“'OàøùË_ÈÈÈÀn·sàÀ¦OŸÎ®]».kŽ ‘Ö(+."ÿü)’““X‰ˆˆˆ\ œ*«§—‘kNii)F£ooo>üðCÆ_ï< S§NeñâÅŽ/[¶Œx N¹ÐÐPøÍo~òeËÛ«?FÞ~ûm^xá…zÏ¥_¿~lÛ¶ ??¿Z ˆšÇW[»v-ãÆÃÍÍ;wËŠ+xà“_^ê†n`óæÍx{{×IrÔ×FCûïºë.Ö­[W§œÁ`à“O>aÊ”)äææRQQá8¶£›6mÁ1£<ºÝÚݱôf̘Qï²ÕêÕ«÷Èð‡_¿âíåœ9Á‘/—\“±‘®M#$D®aEEE@Õ„“cÇŽeãÆÄÇÇãëë‹Éd"&&†÷Þ{ÿùŸÿ©uÜ”)SøóŸÿLDD®®®„††òÈ#°}ûvBBBxõÕW¹á†0ŽùfΜÉçŸέ·ÞН¯/®®®ôéÓ‡^x;wÖJF4æ®»îâ©§ž¢¸¸˜ûî»›ÍÆÄ‰ùî»ïxøá‡ Ãh4b±X2do¿ý6 u’—cÕªUÌž=›þýûc2™ðñña̘1lذ;ï¼Ó±ZIul¥ë1b=þKúýxª‚!"""Ò !Ñ]:1àÑ£Gë”±Ûí N^ØÌfs­I«ùûûãïï_ëßzÑDZA#$ÚÞÙÜb6Êi—¶4BBDD¤6›ÍFjjj­¾KÍDDD4ú;³¡¹É.õüóÏ7¸oûöí¤¦¦ÖY2^}šæ3(_FF†c©ÅêäBÍ„Bff&YYYî¼÷ïßßâc"""€Ú šoøê¹DDDDD¤óõiìv;‘‘‘µ–¤¯iãÆ¬\¹²ÉúÆÜ|3>ÅÅ50ÇXrrr³ÎëÔ×_7¸ïøþýl?x°É:zè¡ìرwwwGÿ¦+õi”èª3€V«•ÂÂB¬V+v»ýŠ&|\]qmÁóñ&¶òr x¾¿>9¥¥”¶p€NÍŽÆÕ‰‹ÐÐPÌf3!!!˜Íf%,DDDDD®’””Ž=Z«OcµZë< û‹ûï'üºë(ÊÉq$ Ο§¬¨ˆ¢ììfµeÝ·¤FF|w3?]\ðuuuü»¤¢‚ì‹«ÉÞ¶­Á: óò0»¸`//oô\Îý5ûj,+ïÓ«WUgÜdâ\a!k×®­÷¸àà`DZ!!!ÄÅÅ)!!Wö š™™éH>Øíöfgî.evqÁâ шÑÙÙñfó©ñf3:;ã×V (./w¼ùJk|d—–Rrñ ¡Æö†õÅÏÏÏG²"22’³°"""""Òz}ôÇŽk²Üñ±¹»×»Ï©´”øùa1ü§+ëa0àQ£Ó? jõõD{yíåÕdŸÆ£¬ŒÜÉ‘š?Ÿ+,l°þ´´4GŸ¦_DD£ ‰””üýý;]ŸF ‰«$)) «ÕŠÕj%33³E‰‡êÑ ®®X\\° X †—\¸n..ô¸äÃ'´‰7UÍ7|úÅìêù‹ÿ¯oFVVYYYub^¨ˆŠŠ"$$„ÐÐP=ë%""""Rš£¸kökÞ{ï=J ±¥§;F5äœ>MQn.¦ôô:}šê/O›Û§ñpumqò¡#ôiÒÃÝû{õª·OSýelNi).çϳuöl\ŒF<1˜Lx∛·7³fÍê”}%$ÚAFFGÅjµ’””äÈt5'éh29F5ø¸]õ õ¿ázãgc++#»¤ÄñhɥɊú&“‰¨¨(BCC‰ŒŒÔ#""""Ò%¥¤¤ššêøbµ¡>ͦ·ÞÂPQQï¾>Dyy©Os™}€ò’Nj̋ý–šÍ7Õ§7nœ]%‘””ľ}û]ÐÕÉ WWM&,¾Fc§åÐù¹¹áçæVg´Eu&2§´”ì’ÇÏÕŠŠŠØ¿­ù*‚ƒƒ½z9þ«–˜˜Hhhh»ÎC¡„D+“ûöíkp„«“ÝM¦ª7ëÅoè¥s¨QÑ×Ó¨I‘^TDvI ©……ŽŒcÍ+W®$88˜#F(9!""""ªOcµZyå•WÈHJ"çbÂVc·ëÍf"L&õi:aŸæR¹V+¹V+§·mÃÅhħW/"#y÷¯ªV,Œm—>-”’’¦M›LBt3 µX”€¸Æ¸¹¸j±j±ãëëHP¤ÖA‘––ÆÊ•+ɉøøx ¤Wç»”·¸6•—”™œÌ¾}ûÛ’““INNfåÊ•DDD0bÄFŒ¡„ÄÕ”@BBB½«a»»âîNˆÙ¬áJ]0AUáRíöZ£'ÒÒÒXºt)&“‰ØØXÆŒCXX˜‚'""""íÎf³±ÿ~Ö¬YCVVV­}®NN„˜ÍT”–‚¾Tí’B-n78QP@ja¡ã ×êäÄG}Ĉ#3fL›ŽšPB¢‰ˆúÞ´Áîîô©Ñ!•®­z8TŒ¯/¥¥œ((à„͆½¼œ¢¢"vîÜÉÎ;‰ˆˆ`âĉJLˆˆˆˆH»Ù¾};+V¬¨3»ú‹ÕêÇ”E}?77†Rõ…kÍäDQQ›7o¦²²’É“'+!q5fúX,ôñðÐ%i‡«+1¾¾ÄøúbµÙ8a³‘VXTeg͚ŰaÃ7nœæ™‘+ÿ÷©››#QݧÑò›Ò˜šÉ «ÍFb^JJ號Oö©Sø^w]›´£„Ä%RRRX²dI­•2Ì.. ôöVæPZ¬ú±Ž‚ÒR¾ÏÍå¤Íà1q×]w1zôhÍ1!" *ÊÏæÂñ}¬É´0|øp%2ED¤ÙJ IÛµ‹œmÛˆôðÀ×hTŸF.»OS\^Nå… øà¼CCéõö:1¡„D kÖ¬aíÚµJDH›ópuex@½½k%&Ö®]ËÞ½{™:uªã‘zåg“º ©û«f½VBBDDšãìþýÿòKÊKJêﯠH«ÔQ“kµràƒ8¾ññ¸^²ähs9+¬U¼,^¼Ø‘Œpurb€—? Q2BÚTubb|Ïžt3ªÉ/çÌ™ÃÞ½{ i•ÒÂB®\ÉÑuëÉ‘+åü÷ßóÍüù¤9‹)‹$ì IDAT/¾HJJJ‹ŽWB˜3g;wîÀÇÕ•1Äøú*0rÅx¸ºòã  n¸xŸ±páB¶oß®àˆtpþþþ„õî‹g÷^ †ˆˆt6›×_-_~ÉþeËȬge@‘+¥¼¤„?ÿå/dee1gΜ%%º|BbùòåŽù"‚Ý݈Ÿ–º‘víåÅèîÝqur`ÅŠ-Î*ŠHûŠ‹‹ãçOüŠcU0DD¤CX±biii,ÿøcŽŸ>­€H»‹òòª¾h3g¶‹¨7¥K'$Ù²e P52â–îÝ5Ó¬´»îîü°[7ÇxÅŠ Šˆˆˆˆ4ËÞ½{£½ƒÝÝéq™Ïò‹´F_OO~àç×â>M—NHlÚ´ ¨š3bL` î"¹jz¸»3àbV199™ÄÄDEDDDDšÝ§1»¸0LWÊUÔ×ÓÓѧٹs'MÓe6›åé©‘rÕEyy9ÝØ±c‡"""""ÊÈÈ ùâ|Ñ^^êÓH‡èÓTKHHh²|—MHX­VÇÏÝM&Ý9rÕ¹¹¸àãêêøå"""""Ò˜š3Vÿ)ÒQú45ûÜ Ñ*"""""""Ò6*+°ÛíM5(ZSZªÉ_¤ÃÜ‹""5™<} ŽEl¨…€€DDDjþ^°••) Ò1ú4-¸•’òòèm±è™+¹ªŽççSz1›("×öíÛÙº-,[nÿùoÏäéKØàÑŒ¡É—ED¤¶šlÌÍ¥¯§§‚"WÕìlÇσn²¼Ùlåå|—•¥@ÈU“U\Ìw5Þ¼"ÒqeffrúÄ1òÏŸR0DD¤Ãè¯6¤ôiæåh±ßä1JH\tÒngÇ… „\•7î¦óç5:BDDDD.[yj*ÎF£!WµOà\VÎ-½®kÖqJH\ T%%¾NO§¸¼\A‘vaµÙj%#ªïE‘–È9y’ó‡))!íîxþÿoïÎãªïý¿“™Ì>Ù& Y&Õ° !* Ä¥²h­µUŠÚRʽ·Z­Uéí½ŠôÖŸ m5ˆÖJ\+à "bXDHØ$ „=!“möÉÌä÷ÇdÆ Ù·I&y¿ž§Í™3gÎ|rN˜Ï{¾ç|µn=MLq1”bqÏzq–ÏQ0Ië@ËŒF|VY‰R½ž…¡Ac¶Ùp¢®‡jk]'®úR1$wDDDDÔ7WΜEõ¹sùû³äGjkq¬®Î­§ llìñóHðµYŸ_€ Z Ç=%ÕÖb_Utœõ€X‘N‡ËË‘¯Õ:Ž?« c/\„Š÷1!"""¢~ºræ,~øàCˆZopy¦¡= šˆÀ@€ŸÙŒÄÎ÷º§á,ÎBØíˆ½| (;v¡5f3>®¨À8™ S¡ðóc¡¨ÏŠt:œih€¾Í%Aþõ ˆ).†Ðngˆˆˆˆh@XššûÆNX¦NÁ~~8ÓØÈž†”¯H‹^Ãî=ƒÐêš>õ4 $®ØØÅ™3(W«Qâ¸Sí%ƒ— „‰ÅHR*¡–ËY(ê]s3.éõ(ÒéÜ‚?³Q¥e½ÎDD£SCEιGÞÖ¯_ääd…ˆˆzDså éÖÓDK¥'—³§¡3Ûl ® ÂW @Þî=°´Î¨QYÕçm3è¨(­£%ÆTV¢*"ÂLԘͨ1›!¯¯G´L†qr9‚{x³]'l¹Ñˆ2ƒeF£Ûc~f3ÆTTòò """"t•UÖÔ¹õ4eF#ÊŒFÈëë§P`œ\ÎQÔiOS¤ÓAoµbÅĉ0h4(øàÃnŸ[Üœ“8øÂ ظq#‰¾[,®`âJX(êT!° ÐÛlÈ×j‘¯Õ"P(DœB0‰„áÄ(?akL&Wqõž²¦&¨4u "ˆˆˆˆhÈzšªˆ4ºzš38ÓØˆôÈH†ä !ªF\jôÁéë?”ÑÝ $zxG—•#º¬šà`hTÁ0´Þ¹¶ÁjÅɆ€\ @´L†0±a‰kX Lºæf” ¨1›Û„7« ÖÔ"´æ Ä F4B¤§§cÖõ7ãós , ©äädlß¾ÛfÎêq0a--Ec` ®„‡Á$“Anµ"88ðñq Á§Ñ×Ó”¨1›Û=îg6# ¡ÒúC@Uçø–Û,¡10uª`˜d2p9ab1Â%Ç¥Roœ¬5f3ª[OÖ¶÷„hB44  ¡÷‡ """¢aGh·»õ46¹§Nbc5k$þþ05Ô¾×h ØÓŒPE:Î^D9{š M-üuúÁ=ù+è±Å‚°š„ÕԸ B¦ @×:Î{N8 …—J$!L,æ°¨aÎ<Ô[,¨6™Ú]†áäL ZC""""òªž¦­ÆË—Ñxù²ëç¨E7 @§s[Çù¥k Ÿ‚D"ö4^ÐÓŠDn£÷ýd2øŠDÐ_¹ß’ 0ƒ ­þ ƒB0Äpšr4B§TºFOŽË;ZGO€Ÿ‚D"×ÿäSÇ! kn†Þju…ºæf4X­®ïkµA¡Õ:BŽ—cшTxä($ccÝzš«¿tõóñA´L†¹!!,Ø0êiê-׈îE±c£TÀPW‡êÓgÜB'_D¢!ëiH Þ•*Y}}aI¡W*¡U(\÷ž€æ––v'4àI!.‘¸B‹«S-ê½:³z« ÍÍŽÖfC½ÅÒéÈ×Y³ ­R£ ­²Aº~Šˆˆˆˆh8‘H>Ÿ«¯/tJ%tJ Ri»ž¦€$0ƒöÛ*£>{švQ«Å%½¾Ã{?´uú»c¨++ïôñ¡ü‚•Ä`Ønw­Ë R)tJ%,"¿v'4àIÖtëja­3y‰Dùú"ÐÏÏñ_žÜ¨3›Ñl·»Bgè ·Z;¼ßCgáƒÔ`„Ôh€Ôà „v;d""""Õ=M`c£ÛåÉ©ÆÖ¾ÆçR1N~s ò÷GؤIðŠ‚H.ƒ¯@ˆC%%n_:{šp‰ÄýgŽwõ4Î>¦£iY}E"ˆdrm6Ôt0‹_ÛžF®ÕBf¾_¨22£±Ý·ìf‘F©F™F© Vo» €+¤è*sžÐ"__‰D¹Py›À çÉxõ{·´†`±Ùº¼¼¢«àÁÏl†Ìh„ÈÒ ‰AïÑk¥ˆˆzüµXeX,Âýý k3d–ˆˆh8ô4WOkoijBÙ‘#®Ÿ­¾¾hž–Ò£žæžØØN_¯T¯GsK‹[Oã͆stCw=MD` ¥R˜šš`¨Õ æÜ9×l(©²è(¯îiH b‹b‹¥ÝMÍ"Ì"?˜drؾÐ*“L»°ã@¡í‰]ÖÃË œ—‰\­m¨1Ú† WëÉå½!1àkµBli†ÈâH }mVDÔ/yyyÈ9›‡ÒÔ)7 úë)T‘˜|˃X3/œÅ'""7%%%ÈÊÊBEB¢Êʆå¥ÅB»‰?œ‡UàÛ®§iû¬¬© W !V(!’Ë wü»'‹á\¹‚jƒ¡ÛžfNH‚[¿ ½š®¹Ùt%T,Ƙ.ÂŽ3 ¨6™ºíÅ®Xœ#@ÓÔÔí¥pþ냎‚¡Pbá…a÷;—PFD@­V3)A…³‰Ž¸êqƒTêvrpàv¡Ðí4]q^&Ò‘²aö‡Í×jƒÄ o­#là d#/³ ¢ASPP€¯¿ü<HuÆ`0 °°ðWÂÚÚ GΠ¤³ž¦I!‡ÐfÇ•.úmB<ÐÁò«{›Paa‚P,A¶ g**ºÝß”°pŒ êî7yü:ÝpuOSVYÕé6"*«:ìÛF£„s¤ÅØÜ‘÷ö4½ýÂÒ_§Gʉ“ýÞIgÏñ—1@|Y"""""""ò4DDDDDDDäq $ˆˆˆˆˆˆˆÈãH c:MÎ~ö:^|ñE”””° DDD4b0 ""Ƭf´ÕÅ(,,„¡›ù׉ˆˆˆ†ZYtöàwÞév]βADDDDDDD • ×AQZÚíº $ˆˆˆz!11:³ jL, )µZõë×ãß<™ÁÈ‚×a ADDÔ ÉÉɈ˹ƒˆˆ†”\.Grr2èô,y%ÞC‚ˆˆˆˆˆˆˆ<Žy """""""ò8DDDDDDDäq $ˆˆˆˆˆˆˆÈã8Ë ½^¬¬,„„„`ùòå,QȃÇ`Âk°drÔj5 BDDnŸµÊÊÊ U( 5 ´ÛYò* $ˆhÐþܼy3ÊËËsçÎEHH C^ïã?ÆÞ½{ÇõýÏúëùIdŒŒCrr8‹ODDnJKK±eË )qùùðçôŸ4 D—•#nùtÌXµªÛuHÑ€+))ÁæÍ›a2™\ËjkkHp2£Qþþˆ‰‰év]D4 òòò°mÛ6·0‚ˆˆˆˆˆèj¼©% ˜ììllÙ²ÅF„ŽOaQˆˆˆˆˆ¨C!ADb÷îÝØ³g@à'Fì¬[ –âÊÅ\‡ˆˆˆˆˆÚa ADý–™™‰£Gp„o΀B‰†Š"‡ˆˆˆˆˆ:Ä@‚ˆúìê™4dAáHºáH”A,Ñi6`¨¯F~~¢££!—ËY"""x "ê“ÚÚÚvaÄÄ›2F 0}]~ø"/½ôJKKY"""Ö R)*´Z”””t». "êµ’’<÷Üs®0"t| ®I_?‰ŒÅ!""""ÅÊ¢£ðé…BìÚµ«ÛuHQ¯äää`óæÍ®™4“¯Eüü;Y""""“Édˆ‡¬© B›!¯Ã{HQeggcçήŸãæÝŽð„é, *ééé˜uýÍøü\‹ADDC*&&7nĶ™³X òJ $ˆ¨G®žI#qáÝŒŒcaˆˆˆˆˆ¨OHQ—ôz=²²²:œÖ“ˆˆˆh¨åå塬¬ sçÎåLDD^†uª£i='Þ”Á›WѰ±eËÀþýûqÿý÷#99™E!ò¼©%u¨¤¤Ä-ŒP†Å2Œ ""¢a«®®[¶lÁ®]» ×ëY"/ÀDÔŽ3ŒpΤ:>…3i‘Wøê«¯››‹ 6 $$„!ÆH‘›ììldee¹Âˆ¨© 3m C4D$Ê DM]ˆµœ¬‰ˆº:>&m=´5—áã'æßL"/À@‚ˆ\öíÛ‡wß}×õ3§õ$j/;;¿=Œ:½“—<0è¯'Q!fÚ"¤Ï gñ‰ˆº ’"~þ¨øá0üÃÇâÓ3u˜Ÿà¥dä¶<%%%ÈÊÊBEB¢ÊÊ 3y ÐSiꘈ©óæu». "Ð~ZOΤAÔ1FƒËEX"¢abýúõ8VÔ£À9ÑÑUk›ñq®)1 LŠ™³o þJX¼= ªº:̈ŒÄìÔÔn×e A4Êéõz¼ñÆ8uê@$ @Ò¢»F‘WHNNFQ³5Zk»ÇšíÀñbJ4f,š±M;ÑpÂ3’hsNëé #dAᘺ|-Ã"""QªµÍxïû+È+Õàw¿ûvïÞÍ¢ !A4J•””`Û¶m¨««E'a|êƒ6­gVVd2NJDDDC£Ù¼ýÞ¿QWW‡={ö ''kÖ¬ALL ‹C4DHByyyضm›G§õ,//gቈˆhH…ÅOƒ¶º†új”——cÓ¦MX¶l–/_Îâ D£Lvv6vîÜéúy0gÒAPt¬ ?ÔìE„ÅOc!ˆˆhTS¨"qMú:”ä|…òÓ_GK ågT–€hôؽ{7öìÙÀ1“Fì¬[uZO?‰ É‹ÊÂõCCEιGÞpÜI>99™E!"꧘i‹“Œ‹Ùr´ÑâM-‰F‰ÌÌL·0bâ̓FyB~~>®”Á¤­ïÕóœ£%¢¦.t-Û³g233YT¢~(HˆÇk9'ñ /t».GHpz½Û¶msÌQ ÇLãSïàLDDD4"¼ôÒK€¨© 3mQ¯Ÿßv´„Y×€éóobQ‰<„ÑV[[‹­[·ºn() ÇÄ›2m& ¢Ñ`Þ¼yPEÃñK:ƒˆh„pŽ–h¨(BÎ!j­õ˜Ÿ±px(ONNÆöíÛ±mæ,þÉ+1 ¡JJJ°yófΤA4„„„ ÙO‰Bs‹AD4ÂFÆJë-xïû+˜Ÿ€X•„…!$ $ˆF œœìرÃF„'_‹¸k—²0D^.++ 2™ ÑÑÑX½zu§ë½øâ‹=ÚÞoûÛNËÎÎÆ‘#GºÝÆÜ¹s‘ššÚác%%%ÈÊÊêvݽŸwÞyeeeÝngåÊ•Þ!¿§ûÒÕûñtmâý TmûXéim‡Ó±Âó°÷µõä±2šíÀüF¨ƒŒ˜Ÿ«ÙFÙ8ˆHQW^<5­' >‰2Èõÿ—_iMVä”t~Ɉóž1ÝéjçŠ*{´ÿð±©;ÞNÙ%M¶ÑÝû9¡U¥—ºÝΩKhܯ}éêýxº¶ñ~ª¶ƒ}¬ô´¶ÃéXáyØûÚö±"ÎHçh‰ª£ï¢àüÎÄAÄ@‚ˆ:²k×.|õÕW3iŒO½ªØ‰, ‘—QS¢©êÇF E†SeúNŸ£ ‹íѶ»ÚF½]Þ£íÔÛånG×Ô³}éîý´Èà ³w»ËM€¦ŸûÒÕûñtmâý TmûXéim‡Ó±Âó°÷µÌcE¢ Bh\Ê ý-Ö6Ö£àüŽ™8rrr°fÍŽ– ê'Ÿ––––ÑøÆóòò°eË@\~>üuz 4ä âað÷G||<6nÜØ«çfffâèÑ£®0bâÍœIƒ†Ô‘7ž¬_¿ÉÉÉ#ê½U6šñù9ÞC‚ˆh4Ñi*p1ûCê«]ˆËh ÞÔ’¼µ§ñe¹ˆ¼›^¯Ç³Ï>ë #dAᘺü— #ˆˆˆˆs&ލ© ]ËöìÙƒgŸ}%%%,Qð’!¶öû〼ݻqàÏv»þÜ_ÿ )÷Þ `t$¡Îú´Õb³Á¬ÕB[Y‰ªS§Qøùç¨>s¦WÛ»Õ‹N‹úâb\þö[œ{ï}XtÞ5_II vìØáº®\‹¤îæ´žDƒ¨¶¶—JªÐX©C@Ä8„ˆh”‰™¶Á1É®Ñåå娴iÓŒ–¨­­Å‘#GP b‹Åë?÷÷¤ÇéhÝ‘ü™$ã‰abüâÅJº¾@€Ä%·ŒúZù"tÂLY½ wf¾ŽôW·A1&¼w¿Ð±ˆ”ÌyôQ¬zçøGEyU±yófW:>“oyaÑ ;|ø0ÞØþ2~ø"“Å "¥:-‘ ½Þ³—×ÖÖbÏž=¨ŠŒ€YäÇ_ÌüÌï /àÁiÓ{t :GH ~2â-BÁ'ŸtºNÌÜ9…¨Fe}Ú¦Ÿ>ÄJ%Tññ·ðz$/_ލ™3qgf&>X“]uu·ÛHip0¢gÍÂì_®…bL8æ­ÿ5>ûíÆa_«§õŒšº1ÓñD""""ò ¶£%¢gß‚Z“r9ë2\úoÿÌ?p„Ä0`Ñé`njBÒ²¥]®—´l9ÌZ-,£üœ-6L (ÿþ{|ûâf¼³jjóó! ÅÚÔãíØ,說·{7¾øÏÿr4ö^r̾}û\aD:™aÑqŽ–PŒ‰ÃüFì?_³ÕΠÞü™4à‰aÀW(DñÁo0þÆ4(ÂÃ;ü†_¬Tbì‚ëPüÍ7»`A§ÛŠž=Sï^°É“!V*aÑépåüyœ{ÿ}\úú Ûº~r9:ø5ꋊðÎÊUˆœ>3z¡ÉɈĨ¿T„Sÿ|…Ÿ}æþ">>˜¼b&¤§#06V³U§Ná»W_…¦°÷}²ò°0ü}þ|XMæ>í[o說±çñ_áž÷ߨk®A̼y(9|¸WÛÐ\¸à;ì6¯8fV­Z…Í›7Ãd2¡¾4…‡>@Âuwòd""""b¥õ¼÷ýÌO@¬J‚ #Ý}æ—bÚšû›š eD|…Bj5¨8y'3w þÒ%·õçLÁÔ{îÆ˜)S!SÃj6C[U…KàÌ®,˜ÚÏÈ>y2®¹÷^D¤¤@sSªÏœÅ©·ßFʼn£¯æa9ô~~(>t>¾¾H\Úñ(‰ø›o†@$BñÁo ‰:\'åg?Ãò­¯ öºë ‚¯PI` ÔsçbÉ‹/âÚu¿t[ßfv„±ѳgcùÖW={6ÄþþJÄ0iÏ?‡¸Åîß¾/úÃÓ¸î?6"$9 B©’ÀŒ½~îxí›< ~2Ç8µ¶aDo÷­·Œ μ³ ·¨÷£Â'OT>íÇLLL 6lØ€   @mQ.ò¾zÍ&O("""¢!Öl‡Ûh‰]»v¡¶¶–…b]}æW„‡cåÛÿDʽ÷"hÜ8%ø …PŒ Gâ­·â®7ßDèĉ®õã-Âÿø;âo¼Š1áðõóƒH¡€*>3þs¬ü×Û‡…¹½FÒ²e¸ãµ`|ÚbÈBTð ! ÆØëà¶W·aòÊ• $Èó|:›Å‚¤¥·v¸Nòòe°77£øÐ¡W%$`Σ뀖ä¼±ÿºë.ü-u>þyÛí8öÊV´Øí˜¾fë$»Õ ɸþÉ'ðÇâÍ¥Ëðêµs°kÕjÔœ=˜²r•ë9ѳg#iÙ2´Øí8¶uÞX²ÛçÌÅûkÖàÊç±ðÉ'ÛÝœ³/ûÖ—:FYD¤¤ô¸îŠðpL¸ýv¤=ÿÌMM8úÿþŸ×7111xê©§ÕzSžúÒ<œû<“¡Ñ0QZoÁËo~Œ¯¾ú Ï=÷öíÛÇ¢ A¯Õ“Ïü3þäaa¨9{d<€¿_·¿n>|ð!\9B‰sÖ­s­í/×ÂG @ÎŽ7ðÖòtlŸ3¯-\ˆOÖÿÚÊJÈCCqí/׺Ö÷ŽÂõOüp23ÿ¼ýüm^*ÞJ¿ Ç^Ù »Í†Ôßü±±£ê÷ÃK6†‰f½¥Gb삟2ÅmËÀ±c6i.ûm§SÔLúÉðpþ£pôå—]Ë›ÊËq23²LYµÉ·¥£úìY·çJpùÛC8ô?ÿëZVwñ"¾ùŸÿÁ]oî„*1Áµ<ñVG`r6+ '_ݵ¼æì9ì~ôQüdÇø ¶o½ÑTVª‚;|¼ó©€¬(øôSœ|=¥¥^uÜÈårlذ;vìÀéÓ§al¨Æé=¯"é†ÕP¨"yb 1‹­`2™ðî»ï"77kÖ¬AHH‹Ó‹Ïìý}~wŸùý££Ñ¬×cÿ3@Cñe×òªS§pàÙç°ò_o#|Ê_ :gê8‘™‰æÖÙU,:+.:c}–ýõ¯µùO^±‘Ç^ÙŠ“™?ÎÔ¥­¨ÀÉÌLøøú`öÚµ˜xÇ8ü—¿Œšß7GH #÷íà ÑVrë|ƾü²ÓçŽiðÃGuø¸söŽ1S§vøøéwÞi·¬®¨ V(\ËÂ'Oäïý¤Ã“üDæë¾o=uŒŽ‘~RiïN¡ã.Ä´ûïo7¬Ê[B‰uëÖaΜ9Ž?„úœû< E<©ˆˆˆˆ†XäÄy˜²ìˆd€ÂÂBŽ–ŠÆ·›Ïü?²ÿ¸~¡[qu_ä'“¹–Õ>ù„[ð8¾¬}}Ñbìyô1ײèY3[û¨½÷DŸ:îÛ9cº××ZŒ“••ÈÎÎîv]ŽFŠ¿9{s3Æßx#¾}q3l‹ã¾·Þ›Å‚âƒßøÑ?"Âqb\*îðqç £3¦ÃÇ.—´[æ¼Ç||~l~[Oކ˗;ÜNåɜ߷ž+ýæÆÆ¿z øø@ ÿˆÄßt#¦¬Z…±×ÍÇûk2 ­¬ôºã'##‰‰‰Ø¹s'ìÍfœÿrâæÝŽð„é<¹ˆˆˆˆ†B‰©Ë×¢ôÔTçs-±víZÈ9WhçŸÙ;ÐÕ(Šþ|æ—`òŠ•ˆž= Šðp×½ï|‚v¯óÕÿˆô­[ÓMŸ–†šsçP~ü8ʾ;ŽŠœ´ØÜoœ©ŒtŒ^¾ï“½]¾7ÿèhï$TÁ(­ª„öða¤¦¦v¹.GH #¥ÇŽA¬TbÜõÆ-à IDAT×¢¯½òÐP”=ÚåtŸÎû6XÆw.¿úþW?Þçó›;YßÔÔ4àûÖS!IImUUÏžÐÒ£Fƒê³g‘ýÒ|»ù%HU*ÌyìQ¯=†RSSqß}÷AÒZˢá4÷O.¢¤R©3n<”a±,õ˜ŸD†¸k—bÂk ðpŒ–Ø¿ß?ÿ†„`éÒ¥¯¨€ØÒÌ"÷ã3¿22+ÿõ/Ìzøˆ˜6 ÊÈH¥RøúùÁÇ·}Û\›—­X‰œ7vB[Y‰ð)S0ýþê6Ü·w/&Ü~›ûᅦ£¸EmFaŒ!1Ì\Ü·±óç#iÙ2\øòK$/s\¾qñË®‡t5M)äJ¥®k˜ÜN™´Ë ¡§l–f%bÅ"·Y4œÄJÅí›s6¾N—“¿g®ûP·^úàÍ¡„Z­vM ZvêLÚzN J4€çXÜ䙸ü\‹ADD½‡¸‹PxôSH$¨Õê~ééé(ö9¶ŸŸùç>þ8ä¡¡ÐVVâØ+¯ 2÷L°77Ãn³aíñïÚm˨ÑàèË/ãèË/#@­†zî\ŒO[ŒÈéÓ±ð÷¿‡ÀO„³ï¾ëêuD ^[xC§÷8Bb˜¹ôõAØ­VDÏž±R‰ØëæÃf±¸f茶²×áãAã˵ýÚ?ƒÆ1]‘TÇC‰"¦M’} ?ÞÞ8¯¿ê-gòÙÙ´ªÞÄ9-¨sŽÚ¢\œýìuÎÀADDD4Ä&Œ‘ⱟ¥ã†nÀSO=…iÓ¦±(ÔÙg~ç=ö<ö ?ûºª*XFØ­V(ÂûÝnci)Îfeáß¿x7ý pÍOïq{Çr„%‰aÌ¢Ó¡ôè1øúù!徟ÁO&CÉá#h6tÝHV´Þ»aâ·wü‡ï¶tÇz9¹ýÚ?MA`üií&¡3Èðø¾ÆÆâ–Í›áë燋ûöASXاí$,¹À÷´i¡„¶º˜Ó‚ ™È7O ĵqþ }±zõjδ1:ûÌï (ô5WÚ=gÖ/~´´¸zH{þ9ÜÿÙ§k½é[…_|‡†º–•sŒ°H¹÷gî—zî\ÜóÁû˜½v- Z[ï¸;é®»Zþ²Ûçœ{ï=Ø­6$§§ãÚu¿„tb1ÔjÌ^»n» v« çÞ¿ûÖzÛôûïǤ»~q@|…„Mž„导Òáeƒ±o~r9Â'OƼ_ÿ+ÞzþÑQh,-u¥‘=%RÈ¡JLĵ®Ãü À5¬j$Ëåxúé§]3pªqòƒ-Ði*x¢ ¢¶_M#Åm)!ˆ³0C 'Ÿùë[oÚ?çÑu@ !lÒ$,yá!’+ÐT^Àq™¸¯ŸÈBBpã¦MˆŸ ‘BʈÌ}Ì1»†æÂ…{¢÷߇ÕdÂø´ÅH{þ9¨Õð ! QaÒ]?ÁÍþoÄÄ@¤PŒªß ï!1 t\¶!V*a3›qéà7Ý>§þÒ%þËÌÿío1=#Ó3®©ÐÒ‚Ã[^B}Qÿ¦‚,üü L¼óNDNŸŽÿõ_Xð_ÿåzÌÔЀzw¿÷Þ€ï[wó—?Ž/~÷ÌZmŸ·gÞyyÿþxÄSJ¥8pàìÍfœû<ñ©w@;‘'Ñk¨(ÂÅÃ!iî-øÉMsDxP_?óŸzëŸH{þ9L^¹“W®t-×UUãƒÀœÇ…t4nÜ´ ðÖòtDÏžÿ¨(Üú—¿´{ ›ÙŒ#ÿ÷W×ÏÚÊJ|õÌ‘öüsHX² K–´{Ε¼<|·m+ Zf­eǾCLê<”>Üã0μ³ u.âš{ŠðÉ“!R(anjDÕéÓ8õÏ·Q™“ÓÿkiÁÞ_ý3|ãÓÒ ƒAS‡Š'ðÝöíÐ×T;V³Ûu߬&3 šZT:…‚O?Cé‘#}z;6³ºêjT>>úU¹§Fìqµzõj¨Õj×´ _¿ÃiA‰ˆˆˆP³É€²Ó_£êüQ@á·ÂÿÖYH ¥ž|æ/üì3Hü1eÕ*(##a¨Õ ì»c8¾ýoÐ×Ôàøö¿!hÜ8¨Æ‡¶ª ÚÊJdÝóSL]½c¯_EX"ôµµ¨8q9;ßl÷…ëÅ}ûPéR~v/¢f΄L¥‚ÝjE}q1.|þNïÚ{óèš-ŧ¥¥õb˜Q&//[¶lÄåçÿ‹)5©çccq÷ûïÁÜØˆ×§± ½Tƒ¿?âãã±qãÆAyœœìر&“ 0fÂŒ›}ë ý£|ñðG°ZLüåŽBÚêbÀúõ둜œ<¢Þ[e£™³l‘程ÞñïƒD"ÁòåË‘–6øŸ‰·ÍœÅ_yeOÃÔ+¡ÉÉ7Ň¾¥ƒÇyƒJMák˜š6mT*•kZЪóGa5›eZP}]êKóXtQ>þøcìÝ»0÷þgY"¢QîêQŒŒŒA¿i¥ëKÖÓù%+ *M1uÞ¼n×e A½ríº_B=w.ªrOá»W_Å•¼ó°Yš¡ŒŒDÒ²¥¸æÞ{÷ìa±†±˜˜<õÔSغu+ÊËËQ[” }]%&Ýœ?‰lP^3** R©”Åe¦M›6âFG9 娢áJUW‡‘‘˜šÚíº $¨W¾}q3Ò_݆1)× ýÕm®sáË/‘Ï@bØ Á† °uëV\¸pƆjÇÍ.çß…*rÀ_oåÊ•lL‰ˆˆhÄ0iëqþË®Ÿ=5*‚h$á´ŸÔ+ —/㽟ý ¹o:nÒb5™a·Ú`¬¯GéÑ£Ø÷û§ðåïž`¡¼„\.ÇÆݦ=÷y&§%"""êF°*‰'C"‘`ÅŠظq#â^â ê5C­Gþï¯nÓØwËÈÈ€J¥ÂÞ½{]Ó‚Žu gà """êÀ„1R¤Ä(`ðŒF#ƒ¢>b AD€ôôt¨T*×´ E‡?†D](Í=P§ÜÐé:Õ…'aÑ7v»­Ðñ)(ƒ:|̤­Ç•‹¹ÝnC$èòœuîowºz? EÐÖ\îvʰXFÆõk_º{?ž¬í@¼Ÿª­'Ž•žÖv¸+<{_[O+ž8=Ò<ù×% V%ˆ…rÈårþƒHÄ@‚ˆú+55*• Û¶mƒÉdBÑáÐTU<(3pe§ÅŒënìtüK¹¨«,îv[1c㪠íð±+ M®×êJpÄXL™>»ÓÇœêY#ÔÕûÑ4”ôh_g-BXRb¿ö¥»÷ãÉÚÄû¨ÚzâXéim‡Ë±Âó°÷µõô±2ØçaÞ «}pþÖ7T!02ÑA"\—±W½1 ¢A‘œœìºÙe}}=j‹rak6aü¼Ûm"o¢V«Û-»uŠªÓõÏ}&D]¶{mœ?’“;ÞNžßîÁ6‚åÂ.÷åß=|]mÃzIŠ‚l#!LÚï}éîýx²¶ñ~ª¶ž8VzZÛár¬ð<ì}m=}¬ æy˜ŸŸ+eM0ù*;…Ñ:M.|û!,ú<ü«ÿÄ5 áüG¨ R)*´Z””” &&¦Ëu}ZZZZFc‘\söœ³—†‚„xüý7é¾èõzlÞ¼åååi`x¯§m¨(rÝ}zýúõœeƒˆˆˆÜÃ? ˆšº1Ó È6Ks¸Ú˜3g222ØÓ pOÃÃÄÚï·[Öb³Á¬ÕB[Y‰ªS§Qøùç¨>sfØ¿‡m3gñ:Èårlذ;vìÀéÓ§al¨Æé=¯"é†Õƒ2-(ÑPsŽŠ06T»–-]ºéééÃre2âããQqò$„6ûï{ê-^5Œù"tÂLY½ wf¾ŽôW·A1fà†‹MYµ²Ã?DÎPbݺu®iA-úN JDDD#RiîœÙóª+ŒˆŠŠÂ“O>9lȉ‰ÁÆ‘Xx2£‘= yŽfÚ&q>ÄJ%Tññ·ðz$/_ލ™3qgf&>X“]uu¿_/tº•‘‘µZwß}öf3Îìyqónç DDDäõ¼mT{ö4# GH c-6L (ÿþ{|ûâf¼³jjóó! ÅÚ4 ¯šÌ“—z&-- ÷Ýw$Ç4WE‡?BŹÃ, yµº’<¯Áž†=ÍHÂ^DWU=ÿ ÷¼ÿÆ\s bæÍCÉa÷†PˆikîGlj*”ð a¨Õ âä œÌÜúK—ÓÖÜ9>êzžsˆÓ—O<‰ _|Ñãít&zölLÏXƒ¤$Åb4––!ÿ“OpúíÂnµõiŸÂ§LÁÔ{îÆ˜)S!SÃj6C[U…KàÌ®,˜ÚíOøäɸæÞ{‘’I`ÌMM¨>s§Þ~'Nðàê¡ÔÔT¨ÕjlÞ¼&“ —¿ÿ úº*N JDDDÞÙ ù÷¯¾oh ‘’’ ‚=MŸzš¾¼{^ɨÑàÌ;»0ã¡·h‘ÛɫǙ¯CæöŘp$Þz+â-ÆG¿ø®üðC—¯ÑŸíØ­6Œ]°K^ø_ø®åÁñã1÷ñÇ>y>ÿÿìókÅ-Z„›þûOnÛùùAU|<&Ü~;Þ¿ ô55®Ç“–-à OýÞí9Òà`Œ½~Æ.¸‡^xg³²xpõPLLŒëf—ååå¨-Ê…Y߀¤…«9-(yè ®K€Xè‹§Ÿ~šaOÓ§ž¦/¯ÃžæG¼dà ]:x‘’â¶|æÏ‚<, 5gÏ჌ð÷ëàï×-À‡>„+çÏC(cκu€œo¸]Ûµmæ,l›9 ¾ø¢WÛi§ÅŽëþc#Îÿûßø×]waûœ9ØqãMÈ~i Zl6Ä-Z„Øù©}Úg¸ö—ká# gÇxky:¶Ï™‹×.Ä'ëme%ä¡¡¸ö—k]ëûGGáú'~8™™‰Þ~þ6/o¥ß†c¯l…ÝfCêo~ƒÀØXX}%¢¢¢Úêbœû<Í&‹CDDDÚи!)i‚ ²bOÓ¿ž¦/¯Ãž†„Wk*+HUÁnËý££Ñ¬×cÿ3@õ™3°°¨:u ž}>er·ÛïÏv|ýüPuú4þé¿ÑP|v« Æúzœ~ûmä¾ù aÉ-}~-ÿÖøDf&´••°[­°èô¸|è¾øÝï`nj‚,$ĵþä+!‰ðÝ«Ûqì•­h*+ƒÍb¶¢'33ñýßÿ_¡︃V/9§uÎÀal¨ÆÉ¶p"""vJs@§©@t+f†"V%aQØÓ HOÓ—×aOó#^²á…šŽo¡ý¤R·å?²¶ÓçÔ9ž#ë~H}·söÝ÷:\~aß—˜¶æ~„MœÐçת/.†*! Ÿ|Ù/m¡¶ÖõXÍÙsx}Ñb·mDÏš Èß»·Ã×(øô3Ì^»‘38[D_C‰ŒŒ ÀÑ£Gao6ãÜç™HZx7#ãX """tK—.Ea‚À˜vµAÃRG—ÿaD½÷’’dee¡"!QeeÃvêÏ‘ÜÓôåuFzO#3¡Œˆ€Z­f 1‰•þscc»Ç$˜¼b%¢gÏ‚"<Ò  ø …n×õD¶£),ìpycI©ãm“öõöµ¾ú㑾u+âoº ãÓÒPsîÊGÙwÇQ‘“ƒ›û 3•‘‘€û>ÙÛå>ûGGóÀꇌŒ $&&bçΰ7›qþˈ›w;Äò@‡ˆˆˆUzz:>9£AÖê¶¼4÷ÊNpýì'ðAmm-B®ú,êÍ  %¬ïü>’zšÞ¾ÎHïi¢ËÊ1cÉ-˜½z5‰‘($)  ­ªjw ÞñÚ?  í×öû»fCÇ÷°šLŽƒN,îókÕæåã_+Vâš{îÁø´ÅŸ2áS¦`úÀP«Áw¯nÃùþýã?û@‚7mBìüTˆ øPFD`îc4.¸¶îý÷a5™0>m1Òžj5|…BÈBT˜t×OpóŸÿ11)<Øúƒ\ާŸ~+V¬À#<Â@‚ˆˆˆ͵ãü±~ÝϱtéR<ýôÓ¼Dƒ=Çzš¾½{WˆÔÒÒÙŒ2yyyزe‹ãàÈχ¿N?¤û³öûã=Z¯üøq|ñ»'`jhh÷XÂ’%H{þ¹vËuUÕøà0ç±G‘xË-®åÛfÎÂmÛ··›fßïŸêõv|…<|ô(,:¾|âIÜòÒKð¶¿«lÞîÝ8ðÇgû¼Ïo-OÇOÞØipp‡õ±™ÍØóØã¨8yÒµl|ZÒž®ÓôóJ^>~äX†ø€‚„xüý7ò_/""""bO3Œ{š¾ì¯2"bD÷40㡇0û‘‡»]OðÌ3Ï<3OÞÚÚZ=z¤Ñ@liÒý™õ‹_t¸Üj2CW]Ëß~‹#}Ç_ÝîJ¯VwáÌP«!’Ë¡¯¹‚¢¯¾Âþ§ÿ}M 4……ˆHI4 M8³kªÏžAHB¤AA°šÌÐâÛ_ìõv„)¦gdÀÔØˆoþügTž< EX8ÄþJø¨¿t '33ñݶW]Öú²Ï'þñ >ý -V$ŠDðññ®¦Åbÿ3Ä•~p«K}Q.ø~R $þþŠÅ°Y,Ð\¸€Óÿ|_oúS§5õ4*Íb1‚ƒƒ‘ššÊe‰ˆˆˆˆ=Í0îiú²¿nD÷4eÑQ¸hoAu}=&OžÜåº!á‘&!ADDDDìihôô4¼‡‘R«ÕX¿~=âòó!3Yò: $ˆˆˆˆˆˆ¼\.Grr2üuzív„¼ """""""ò8DDDDDDDäq $ˆˆˆˆˆˆˆÈãH‘Ç1 """""""²DDDDDDÞG¯×£¬¬ Z…Rƒ3m×á """"""/TZZŠ—^z “aIY¢Ëʱd||ŽdOCÃCQ\ŽÖ×!''§ÛuGm ‰D¨S©xÔÐ3H¥0Éå€iÓ¦± DDDDÔ¥äädWOc”IYr h Ĺââ­?ª/ÙX¼x1 )( “‡‡cÖÿÀ„ôtäää 11±Ï#½HtB¯×cÿþýØ¿¿ÛIìkµ"X£Ah͈-Š:¥ FJ½¿Òmy||<–/_Žääd‰ˆˆˆˆ<ÞÓø™ÍPiê¤Ñ°§¡.5ø»¬ˆiÓ´|& àMøHôð$>|ø0êëëÝ“èõÖÔ! ±‘'2¹NØÆÀ@4º†DÑÐö4W0öÂE66²HÔiOscp0®¿ÿ~„BÃ@¢²³³qøða·{L89à …N™ÑÈbV__蔊NC‰D‚¹sç"--—fѰèiöïßòòrˆE"¬ˆ‰Å¥¯¿†¾ºšÅÅ=Mc` «¯¹º§¹á†°zõêAym}P[[‹}ûö¡  åååí÷µZÐÐ…V…NÇÑ#ŒV¡€^©tü÷ªË1œ¦NŠ””¤¦¦²`DDDD4,{šüü|×çÕ+yyÈÛ½'N ®õ ؆€Øö4#T“BŽŠèh˜:¸ÿƒD"AJJ RRRút³J<‘sssqøðáà Àq–B«…Ô`ä /ãa’É» Úž°ý¹© Ñ7ªåå(?qÛ?ùÆÖvÑÙÓ(´:HFö4#€_J Ž |Ûõ4‰‰‰ûb•Är¦ŒÈÍÍmw}V[ò&-”:üÌfžÐÃ(|0ÊdÐ+•0‹ü`”J;L ¢¢¢˜˜ˆ””Þ‚ˆˆˆˆF½^'žx¢ÓžÆ×j…Ô`„R§Ã˜ÊJlö4©Áë!Ó¦!rÆ „$%"rÆ Hüýñì³Ïº¾X‰‰ñøþ2D%%%(((p…]ÎBd1Cli†\«…ÀfcP1ˆ'©E$B³X ­B‹Øñÿ»â Ôj5RRR8 ‚ˆˆˆˆF}O#iiÁ”ü|4ë ~öÚí,ä Ð*Ýö4S"#±jÕªA¹!%äòÎcIIDAT /ãAQZZŠÒÒÒoŽÙ‰^Í¥Nç.´ZpýLí™E"XD"ؾ0Éä0‹ü`‰a”IÛݤ¥#AAAP«ÕP«ÕHLLä""""¢Ö€ÂÙÏ@¥Raݺu055A“_€ò'P[‹V‡Êœ$ÄÃàïïÖÓøZ­ü¶P?¾Ûž&%%eÐnHÉ@b„œÐ¥¥¥ÈÏÏGYYY·#)®&orR£B›Ý-´i'¹stW©6 G#®µZ •JåÁÑDDDDDý÷«Ç‡Élîr_«SO55iÛÓ8¿@í¨§‰Ëχ¿N?¹ !‰IPFF@‰¤DÔ™ÌøûGº¶¯ëiH SyyyÐh4®°Â`0ôxDEO §ŽFY8ÃŒÎô4äh{¢u¦í èäÍàÔ—ájAAAP©TP«ÕÉdHLLDHH§â$""""DûöíƒÁ`@~~>ŒFc§<8m:¸FW@eNŽã¿cPé /¤c‡½‰CC§}JÛÞ¤?_ÚjŠ—ë•î7¿ïê¾mßOWÒ,ÀŠŸþ´ËžÑÛ{^V@AA ?? ÑhP__?*k©TêJÿcè@DDDD4¼èõz”––Âh4¢´´žžÞéúÿøÛßpüĉn·;ËfGˆ[˜áÔ¤£()©ÛmÈššXØùÁÎKPº³rÌ(#~ B’!V8B‹CgNãÀ±cÝö4£áÒq#TII ƒ[pÀ5Ú©/—ˆx*\prŽjà œËyiÑÈ–——çÖÓ8¿urŽ$_¿~}§ |^^¶lÙÒíkÅ„‡cÍrG8"R*ÚÝ ò…^èÑÈõM›6uúÅhÛ^m´Ê@‚Ú©­­Emmm·ë9/)éHbbb^‹7‹$""""¢Áæ•Ñ]/£R©ššÚévÚ† N•Ýw $ˆˆˆˆˆˆˆÈã|Y"""""""ò4DDDDDDDäq $ˆˆˆˆˆˆˆÈãH‘Ç1 """""""ûÿ¸*̾ZIEND®B`‚ceilometer-6.1.5/doc/source/architecture.rst0000664000567000056710000003010313072744706022304 0ustar jenkinsjenkins00000000000000.. _architecture: ===================== System Architecture ===================== .. index:: single: agent; architecture double: compute agent; architecture double: collector; architecture double: data store; architecture double: database; architecture double: API; architecture High-Level Architecture ======================= .. The source for the following diagram can be found at: https://docs.google.com/presentation/d/1XiOiaq9zI_DIpxY1tlkysg9VAEw2r8aYob0bjG71pNg/edit?usp=sharing .. figure:: ./ceilo-arch.png :width: 100% :align: center :alt: Architecture summary An overall summary of Ceilometer's logical architecture. Each of Ceilometer's services are designed to scale horizontally. Additional workers and nodes can be added depending on the expected load. Ceilometer offers three core services, the data agents designed to work independently from collection, but also designed to work together as a complete solution: 1. polling agent - daemon designed to poll OpenStack services and build Meters. 2. notification agent - daemon designed to listen to notifications on message queue, convert them to Events and Samples, and apply pipeline actions. 3. (optional) collector - daemon designed to gather and record event and metering data created by notification and polling agents (if using Gnocchi or full-fidelity storage). 4. (optional) api - service to query and view data recorded by collector in internal full-fidelity database (if enabled). As Ceilometer has grown to capture more data, it became apparent that data storage would need to be optimised. To address this, Gnocchi_ (resource metering as a service) was developed to capture the data in a time series database to optimise storage and querying. Gnocchi is intended to replace the existing metering database interface. .. _Gnocchi: http://docs.openstack.org/developer/gnocchi/ .. figure:: ./ceilo-gnocchi-arch.png :width: 100% :align: center :alt: Ceilometer+Gnocchi Architecture summary An overall summary of Ceilometer+Gnocchi's logical architecture. Gathering the data ================== How is data collected? ---------------------- .. figure:: ./1-agents.png :width: 100% :align: center :alt: Collectors and agents This is a representation of how the collectors and agents gather data from multiple sources. In a perfect world, each and every project that you want to instrument should send events on the Oslo bus about anything that could be of interest to you. Unfortunately, not all projects have implemented this and you will often need to instrument other tools which may not use the same bus as OpenStack has defined. The Ceilometer project created 2 methods to collect data: 1. :term:`Bus listener agent` which takes events generated on the notification bus and transforms them into Ceilometer samples. This is the preferred method of data collection. If you are working on some OpenStack related project and are using the Oslo library, you are kindly invited to come and talk to one of the project members to learn how you could quickly add instrumentation for your project. 2. :term:`Polling agents`, which is the less preferred method, will poll some API or other tool to collect information at a regular interval. Where the option exists to gather the same data by consuming notifications, then the polling approach is less preferred due to the load it can impose on the API services. The first method is supported by the ceilometer-notification agent, which monitors the message queues for notifications. Polling agents can be configured either to poll local hypervisor or remote APIs (public REST APIs exposed by services and host-level SNMP/IPMI daemons). Notification Agents: Listening for data --------------------------------------- .. index:: double: notifications; architecture .. figure:: ./2-1-collection-notification.png :width: 100% :align: center :alt: Notification agents Notification agents consuming messages from services. The heart of the system is the notification daemon (agent-notification) which monitors the message bus for data being provided by other OpenStack components such as Nova, Glance, Cinder, Neutron, Swift, Keystone, and Heat, as well as Ceilometer internal communication. The notification daemon loads one or more *listener* plugins, using the namespace ``ceilometer.notification``. Each plugin can listen to any topics, but by default it will listen to ``notifications.info``. The listeners grab messages off the defined topics and redistributes them to the appropriate plugins(endpoints) to be processed into Events and Samples. Sample-oriented plugins provide a method to list the event types they're interested in and a callback for processing messages accordingly. The registered name of the callback is used to enable or disable it using the pipeline of the notification daemon. The incoming messages are filtered based on their event type value before being passed to the callback so the plugin only receives events it has expressed an interest in seeing. For example, a callback asking for ``compute.instance.create.end`` events under ``ceilometer.compute.notifications`` would be invoked for those notification events on the ``nova`` exchange using the ``notifications.info`` topic. Event matching can also work using wildcards e.g. ``compute.instance.*``. Similarly, if enabled, notifications are converted into Events which can be filtered based on event_type declared by other services. .. _polling: Polling Agents: Asking for data ------------------------------- .. index:: double: polling; architecture .. figure:: ./2-2-collection-poll.png :width: 100% :align: center :alt: Polling agents Polling agents querying services for data. Polling for compute resources is handled by a polling agent running on the compute node (where communication with the hypervisor is more efficient), often referred to as the compute-agent. Polling via service APIs for non-compute resources is handled by an agent running on a cloud controller node, often referred to the central-agent. A single agent can fulfill both roles in an all-in-one deployment. Conversely, multiple instances of an agent may be deployed, in which case the workload is shared. The polling agent daemon is configured to run one or more *pollster* plugins using either the ``ceilometer.poll.compute`` and/or ``ceilometer.poll.central`` namespaces. The agents periodically ask each pollster for instances of ``Sample`` objects. The frequency of polling is controlled via the pipeline configuration. See :ref:`Pipeline-Configuration` for details. The agent framework then passes the samples to the pipeline for processing. Please notice that there's an optional config called ``shuffle_time_before_polling_task`` in ceilometer.conf. Enable this by setting an integer greater than zero to shuffle agents to start polling task, so as to add some random jitter to the time of sending requests to nova or other components to avoid large number of requests in short time. Additionally, there is an option to stream samples to minimise latency (at the expense of load) by setting ``batch_polled_samples`` to ``False`` in ceilometer.conf. Processing the data =================== .. _multi-publisher: Pipeline Manager ---------------- .. figure:: ./3-Pipeline.png :width: 100% :align: center :alt: Ceilometer pipeline The assembly of components making the Ceilometer pipeline. Ceilometer offers the ability to take data gathered by the agents, manipulate it, and publish it in various combinations via multiple pipelines. This functionality is handled by the notification agents. Transforming the data --------------------- .. figure:: ./4-Transformer.png :width: 100% :align: center :alt: Transformer example Example of aggregation of multiple cpu time usage samples in a single cpu percentage sample. The data gathered from the polling and notifications agents contains a wealth of data and if combined with historical or temporal context, can be used to derive even more data. Ceilometer offers various transformers which can be used to manipulate data in the pipeline. Publishing the data ------------------- .. figure:: ./5-multi-publish.png :width: 100% :align: center :alt: Multi-publish This figure shows how a sample can be published to multiple destinations. Currently, processed data can be published using 4 different transports: notifier, a notification based publisher which pushes samples to a message queue which can be consumed by the collector or an external system; udp, which publishes samples using UDP packets; and kafka, which publishes data to a Kafka message queue to be consumed by any system that supports Kafka. Storing the data ================ Collector Service ----------------- The collector daemon gathers the processed event and metering data captured by the notification and polling agents. It validates the incoming data and (if the signature is valid) then writes the messages to a declared target: database, file, or http. .. _which-db: Supported databases ------------------- .. figure:: ./6-storagemodel.png :width: 100% :align: center :alt: Storage model An overview of the Ceilometer storage model. Since the beginning of the project, a plugin model has been put in place to allow for various types of database backends to be used. A list of supported backends can be found in the :ref:`choosing_db_backend` section of the documentation for more details. In the Juno and Kilo release cycle, Ceilometer's database was divided into three separate connections: alarm, event, and metering. This allows deployers to either continue storing all data within a single database or to divide the data into their own databases, tailored for its purpose. For example, a deployer could choose to store alarms in an SQL backend while storing events and metering data in a NoSQL backend. Ceilometer's storage service is designed to handle use cases where full-fidelity of the data is required (e.g. auditing). To handle responsive, long-term data queries, solutions that strip away some of the data's resolution, such as Gnocchi, are recommended. .. note:: As of Liberty, alarming support, and subsequently its database, is handled by Aodh_. .. note:: We do not guarantee that we won't change the DB schema, so it is highly recommended to access the database through the API and not use direct queries. .. _Aodh: http://docs.openstack.org/developer/aodh/ Accessing the data ================== API Service ----------- If the collected data from polling and notification agents are stored in Ceilometer's database(s) (see the section :ref:`which-db`), it is possible that the schema of these database(s) may evolve over time. For this reasons, we offer a REST API and recommend that you access the collected data via the API rather than by accessing the underlying database directly. If the way in which you wish to access your data is not yet supported by the API, please contact us with your feedback, so that we can improve the API accordingly. .. figure:: ./2-accessmodel.png :width: 100% :align: center :alt: data access model This is a representation of how to access data stored by Ceilometer The :ref:`list of currently built in meters ` is available in the developer documentation, and it is also relatively easy to add your own (and eventually contribute it). Ceilometer is part of OpenStack, but is not tied to OpenStack's definition of "users" and "tenants." The "source" field of each sample refers to the authority defining the user and tenant associated with the sample. Deployers can define custom sources through a configuration file, and then create agents to collect samples for new meters using those sources. This means that you can collect data for applications running on top of OpenStack, such as a PaaS or SaaS layer, and use the same tools for metering your entire cloud. Moreover, end users can also :ref:`send their own application specific data ` into the database through the REST API for a various set of use cases. .. _send their own application centric data: ./webapi/v2.html#user-defined-data ceilometer-6.1.5/doc/source/plugins.rst0000664000567000056710000001744613072744706021322 0ustar jenkinsjenkins00000000000000.. Copyright 2012 Nicolas Barcet for Canonical Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _plugins-and-containers: ======================= Writing Agent Plugins ======================= This documentation gives you some clues on how to write a new agent or plugin for Ceilometer if you wish to instrument a measurement which has not yet been covered by an existing plugin. Agents ====== Polling agent might be run either on central cloud management nodes or on the compute nodes (where direct hypervisor polling is quite logical). The agent running on each compute node polls for compute resources usage. Each meter collected is tagged with the resource ID (such as an instance) and the owner, including tenant and user IDs. The meters are then reported to the collector via the message bus. More detailed information follows. The agent running on the cloud central management node polls other types of resources from a management server (usually using OpenStack services API to collect this data). The polling agent is implemented in ``ceilometer/agent/manager.py``. As you will see in the manager, the agent loads all plugins defined in the namespace ``ceilometer.poll.agent``, then periodically calls their :func:`get_samples` method. Plugins ======= A polling agent can support multiple plugins to retrieve different information and send them to the collector. As stated above, an agent will automatically activate all possible plugins if no additional information about what to poll was passed. Previously we had separated compute and central agents with different namespaces with plugins (pollsters) defined within. Currently we keep separated namespaces - ``ceilometer.poll.compute`` and ``ceilometer.poll.central`` for quick separation of what to poll depending on where is polling agent running. This will load, among others, the :class:`ceilometer.compute.pollsters.cpu.CPUPollster`, which is defined in the folder ``ceilometer/compute/pollsters``. Notifications mechanism uses plugins as well, for instance :class:`ceilometer.telemetry.notifications.TelemetryApiPost` plugin which is defined in the ``ceilometer/telemetry/notifications`` folder, Though in most cases, this is not needed. A meter definition can be directly added to :file:`ceilometer/meter/data/meter.yaml` to match the event type. For more information, see the :ref:`add_new_meters` page. We are using these two existing plugins as examples as the first one provides an example of how to interact when you need to retrieve information from an external system (pollster) and the second one is an example of how to forward an existing event notification on the standard OpenStack queue to ceilometer. Pollster -------- Compute plugins are defined as subclasses of the :class:`ceilometer.compute.BaseComputePollster` class as defined in the ``ceilometer/compute/__init__.py`` file. Pollsters must implement one method: ``get_samples(self, manager, context)``, which returns a sequence of ``Sample`` objects as defined in the ``ceilometer/sample.py`` file. In the ``CPUPollster`` plugin, the ``get_samples`` method is implemented as a loop which, for each instances running on the local host, retrieves the cpu_time from the hypervisor and sends back two ``Sample`` objects. The first one, named "cpu", is of type "cumulative", meaning that between two polls, its value is not reset while the instance remains active, or in other words that the CPU value is always provided as a duration that continuously increases since the creation of the instance. The second one, named "cpu_util", is of type "gauge", meaning that its value is the percentage of cpu utilization. Note that the ``LOG`` method is only used as a debugging tool and does not participate in the actual metering activity. There is the way to specify either namespace(s) with pollsters or just list of concrete pollsters to use, or even both of these parameters on the polling agent start via CLI parameter: ceilometer-polling --polling-namespaces central compute This command will basically make polling agent to load all plugins from the central and compute namespaces and poll everything it can. If you need to load only some of the pollsters, you can use ``pollster-list`` option: ceilometer-polling --pollster-list image image.size storage.* If both of these options are passed, the polling agent will load only those pollsters specified in the pollster list, that can be loaded from the selected namespaces. .. note:: Agents coordination cannot be used in case of pollster-list option usage. This allows to avoid both samples duplication and their lost. Notifications ------------- .. note:: This should only be needed for cases where a complex arithmetic or non-primitive data types are used. In most cases, adding a meter definition to the :file:`ceilometer/meter/data/meter.yaml` should suffice. Notifications are defined as subclass of the :class:`ceilometer.agent.plugin_base.NotificationBase` meta class. Notifications must implement: ``event_types`` which should be a sequence of strings defining the event types to be given to the plugin and ``process_notification(self, message)`` which receives an event message from the list provided to event_types and returns a sequence of Sample objects as defined in the ``ceilometer/sample.py`` file. In the ``InstanceNotifications`` plugin, it listens to three events: * compute.instance.create.end * compute.instance.exists * compute.instance.delete.start using the ``get_event_type`` method and subsequently the method ``process_notification`` will be invoked each time such events are happening which generates the appropriate sample objects to be sent to the collector. Adding new plugins ------------------ Although we have described a list of the meters Ceilometer should collect, we cannot predict all of the ways deployers will want to measure the resources their customers use. This means that Ceilometer needs to be easy to extend and configure so it can be tuned for each installation. A plugin system based on `setuptools entry points`_ makes it easy to add new monitors in the agents. In particular, Ceilometer now uses Stevedore_, and you should put your entry point definitions in the ``entry_points.txt`` file of your Ceilometer egg. .. _setuptools entry points: http://pythonhosted.org/setuptools/setuptools.html#dynamic-discovery-of-services-and-plugins .. _Stevedore: http://stevedore.readthedocs.org Installing a plugin automatically activates it the next time the ceilometer daemon starts. Rather than running and reporting errors or simply consuming cycles for no-ops, plugins may disable themselves at runtime based on configuration settings defined by other components (for example, the plugin for polling libvirt does not run if it sees that the system is configured using some other virtualization tool). Additionally, if no valid resources can be discovered the plugin will be disabled. Tests ===== Any new plugin or agent contribution will only be accepted into the project if provided together with unit tests. Those are defined for the compute agent plugins in the directory ``tests/compute`` and for the agent itself in ``test/agent``. Unit tests are run in a continuous integration process for each commit made to the project, thus ensuring as best as possible that a given patch has no side effect to the rest of the project. ceilometer-6.1.5/doc/source/2-2-collection-poll.png0000664000567000056710000010021713072744703023174 0ustar jenkinsjenkins00000000000000‰PNG  IHDR/„æ4ÏêbKGDÿÿÿ ½§“ pHYs  šœtIMEß ! i¤‡C IDATxÚìÝ”Õå}/úþH:SêÐ$¢h ÓÞäjlÀäôfzOïQð$zcn0±bW LWoƒXe%åGNƒF#žjî9§3“Õ›¨!uL´˜Úf§ b´ŒEÆ 11rÿ¾›ïžÙ{Ïža~|×k-×r†ùùÌþ|ŸçyŸïóŒ9|øðáȘS4EÂK “„—@& /€L^™tš& ?;v숽{÷jõõõQ[[«!£õØÙÙ;vìˆC‡i f555Q__ÕÕÕúBUUU1sæÌ¨©©éóo±sçN#àœsΉ™3gžÔm0æðáǽ(¥££#î¸ã #8Qûò—¿¬!£õø­o}+žyæ#䪫®ŠK/½4ÿöŽ;âž{îÑ00BÎ9眸ùæ›û¼ÿ/þâ/¢³³SÁY¾|yÙE.':SVww·F€Tn¨aôëqÿþýFÐöíÛ ÞîèèÐ(0‚J­r\ÂÈ:Ùç‚§b¿1ö}ñ±é5 ƒ¿{~Ÿz„ã¬/š61jƽOƒÁëìúE<ûRÿ7 ~ëCccú¤34ŒòØô÷/˜¤Á`ü°}üÛÛ¿Ð!¼dÎû¾ø÷³ÏÑ0ÊDõÙ¨ÇMŸÓ'0ä^Úw°¢ðrú¤3ô…±©:„áѾï ðò™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “NÓÐ×–m¹øÁ󹨽¯3v¿¶?""fL¯y³ë¢aÞ, £àŽuÍùÿ_¾¨q@Ÿ{ «;îÚôDO-O«ÆKf ÙÏó‰ êbÞì: NX»öí ßmpý<ÔòtìùigLþPM\Ûp±†ÔVŒR¿éuÅñè@Wwüàù\lÙ–‹Ú;""bü¸ê¨Ÿ^õÓk+š“ËX-™fÜy<´mË–±³}OŸ¶;».æ^`\Kö/!¥ù©íÑôµGb×¾ýE;°µŸˆ©“&ÆÊ?]0$áÇÉf˶\ìÞ·ßšAÙµo¬omÔ ´ù©íqû}=Øu+®’Ÿ'ùzË5 /9¡í~­3ÿzˆxvʨ¯›Üïç­om‹­ÏçbîuC~ÝOnHœ(7†³­8y%uëuÅñ$¹¾¯ÝøDèê.:¦‹ˆ˜:ib,_Ô×ÌŸ3,cµ­Ûrùð3Káå뚣aîÌŠúáRŸ_IÛ޷⺲m¶¾µ-tý,nºúr/ZF„ÇÆáH'¹èÖâʦ»óÁåŒéµ1÷‚ºX¾¨1æ^PSΪ‰ˆžåʦ»cíÆÇ5Ü\Ùtw\ñÅ•±ûµNÁ \3ÿâÔ€éé}n²êrüØ*88F‹n{`T¿ÿ®}û㣟¹­ PàĘ“]ñÅ•qû}ÍùpmÊY5qÍü9EçdŸ¿õobÑ­ âN4»öíº?øóž¶yûÐàúï[(hÛd¾Û0ofŸ¶½â‹+ã¡–âãí+¾¸2>ëßÄ®C^´Œ+/áÈD,¹Ó4czm¬Z²°è¦‡ZžŽek6Å[oŠek‰ñc«!JÚkÞìžAÕî×:£eËŽ övy$&€ƒ³³½#n¿¯9n¾~tV¢ì~­³è¿’à2³Í½ .–__|ÅdóSÛãöuÍñB{G<Ôútì~m|ÿÞ¦!ýyæÎ®‹å‘—ÇÚ÷=Ôòt›UO41¿çKDOxÚ´æ‘‚§ÔBµ<¾Û™V«—.Œ;Ö5ÇúÖ¶üãÆUGã¼Yñ¥E Ež+¾¸2"">óæ”|T>ù‘t#ù¾Û–?¹oåŸ.(Øl:Ù ;ýsõt²=§Ûžh'ý1pI µlÙ-[vÄ®î²uÛüÔöük©Ô†îCýºK^óI­³3·'š¾öHŸ:H¿ÿû÷6ÅÎÜž¸ãþ–‚›$óf×|NóSÛ{VÁ¥®5×ο8V.YP²mvíÛw®k‰æ-Û ú—ÌŠÅ /s%Í]‹».‹»6=1$¯om‹õ­O<‚6uÒĘ{A]Ÿ×pRé×lÓšM1a\u|dZmLW?x>ãÇUÇc+o,úý®lº;ÞêêŽ)gMŒu·?¼+éï/¼¬ /-U7óf×Å5ó/.zé]Óµ<ÿ¦Nš‹^ZÑaËÖlÊŸÛûç‚ÁH^ç+ÿtAL™41šÖcñkæÏ‰Å /«xÜÏðKƉˆ¸yã® ãªãæëcÙšžëêúÖ§K^S‹½†ë§×ÆòEE¯©Åæ5ÅÆ™wmz¢ ëoNÕßø³Xm\ñÅ•%û¾RcÍÞÆ­Š]ÝñÖÛ•¼«–,ŒÝ¯uÌuËÍëz·QÒgn}>×g|}ÓÕ—¸Ïœ0®:–/j(øûnÙ–‹»6=[·å Ú'ù,^xiÉëS±ƒ¡’Ï[¹dA4?µ½`î<ùC©ßá% Z:ôkœ7s@ÊwV-.y!Lï×Òû"¾vcÏEîû÷,ës1}¡½#¶>Ÿ‹L«ÍO {þí÷5Ç¿ìŽkçω+nXÕg0˜(ôØÊ :âݯuÆ–m¹8|¸g³æäw?¶*ÞzûPèꎇZŸŽæ-Û‹þlI§ü‰ J2“ïQìwJÿ|É>½ÙôÎÜžXtÛEÛmg{Gìlïˆæ§¶Ç÷+XˉíÚùç÷¼l~j{Ù}gÓÈû¸¹=Eë¨÷ëîÑû“¼æ.ý1Þ>”¯•t¤ßŸÞs¨w-^qêøÑ†ù‰]±kÛÖçsñ£ +úÔK©¯›´góSÛãÚù— vàæë£eËöØýZgܱ®9ç üÔÓ]ÝûM÷îÇvíÛß§?J×GºN#"î¹AqǑߙÛÓçgÚ™Û“ú~¹¢óSÛóßã¾×ÔM©­b¶lËÅ–m¹hÞ²=Ö­¸®àk¦ææ§¶ÔÞ®}û+:ì Ýg¯[qà’!‘¼.·>Ÿ‹;úOŽWW5ÝÝçc¶lËÅEŸ¹-žÝ°¢O½¥_·åú›u+®ëÓ?—»>4?µ=¶nËÅòE %÷ÝÛµo\Õtwɱøí÷5SfLrsfüت_ãæÍŠekz¯õ­mEÃË^ꈻ6õ=a{g{G\ÙtwÑqO±yM%¯ñôœªÔ5»Üø³ØX¬\ßW©¹³ëb}k[ìlïˆek6Ū%åCÏbãæróºÞcÍR}æÎöŽøü­k7>Þ§Ëõ™ºº jºÜ5&ý7xtå}n’”š¯§?ïLÏÏ:øü­ë[ŸŽGWÞè3„<6ÎIí­Ô¤a ¯RÊô…pñÂË"÷·_‰wž»?^ßüõXµdAþ®×7¬*¹ér\^3N<»aE¼¾ùë=ƒÇ#Ë÷ïÚôD,ºí8|øp¬Z² ÿ1Éþ$‘¿kÕÛÖçsñPëÓ1czm|ÿÞ¦xã®wž»¿ÏÏ6T]¬Z² àNÜ5óçÄ÷ïmŠïßÛ”¿“w «;®ú³oäÛmù¢Æ‚vKV¾ílïȯàäÕxɬ|-$wÄKÕãúÖ¶üç›À$¯õñc«bÕ’ñúæ¯Ç;Ïݹ¿ýJÁë®Ø¤m¸5}í‘?¶*Ö­¸.rû•xvÊX¼ð²£5Ótwܱ®9¿éú뛿ÏnX GnÄìÚ·¿Ï©ì[¶åòÁ)gÕĺ×üÎÉç>Ôúôïʼnc¸êX—z¤o07­y$L4Ì›ÏnXïÝÛˆñc«bñÂËbù¢Æ˜1½¶ß•½ƒK‡2Ô’Gm—/jŒg7¬ˆÜß~%?&Lú¢«šîŽÃ‡çû£tq{¯›hÍOmÏ¿n{×wò¹é¯ßÛ²5›Š^žÝ°"®™?'tuçêb}:¸,57¦ÌŽt(U?}à󱩓&æ÷¾,X'¯Éäuž¼¾ŸÚ3s ãž¥«7å_ãs/¨+Ù‡-ºí>ûC§çZIŸRj,–ÔA¹¾¯Réº[»ñ‰øà'ÿ8Ýú@´Y=˜y]:ØëÝgûý’ñuãà¤Ïœ{A]Ütd|û}Íטäoš¾N$mÝ´fSŸ¯Ýû‘üý¾oS̽ .tu—<´÷ßoÕ’ù6¦ÇÜ/ᘽðÒÑÓì†ÂÚGWK®Z² V/]˜ŸüLW7]}yþÀŸ]Ýq}™ ÚòEqÿ-Ÿ‹úºž}I/™Up‡lg{G<~oSÜtõåù¹éêËóáF©;aIhÑû`¢›®¾<;²št(7g®¯›\ð}¦œ51æÍ®‹y³ëòw¢Ön|"ÿ³>¶òƸùúÆ‚v»ùúÆXwdÌÎöŽx¨åi/Þ“\rpÏ–m¹’¯óôŠd°S8akÎÖ[µ¸`눩“&ÆÍ×7æ…;Û;F|ÃòÇÇã÷6ŵ ÇÔI£¾nr¬^º0½ÚÙÞ3¦×Æsß’? ²¾nrÜËçòƒÓÞwê“kÎø±Uñì÷ĵ üÎßYµ8?¨LÁ‚bæÍ®Ë÷7Éãã•Ú²-—Ÿt,^xY|gÕ₈—ÌŠg¾%C-Y]®Üv.•ŒÅ“ñnOßîFÝhKŽs¹mÍ„±ý¯nK^çÉëaÞ캂C’súýys{òûs6Ì›³©lÖ; _¶fS~|•ŒóÒc±ûoù\L9«¦ –Êõ}•š:ib<¶jq¾LVþ§ewÇ?ùÇqѧoek6E˖탚×ë3‹ý~ßYµ¸ à+5¯ÛµoAŸ™nç¤ýç^PßYµ¸ /M®é4í¡–£[Õ$׈äëΛ]³©ìÅôß/™?$ß¿÷ü¡Ü˜€^rÒ؆R² lÊY5%÷[IøS.x)v*rzUÉŒéµE;¬tç±ûµÎ¢_»Ôž›óf×å/ôåV´ µtTêQ‘k.Î.’A-'¯t}”d¥7~ï]+éU™×ÌŸSrÏ­›®¾<ÿºK¾ÞHi¼dVÑOì‹í 5a\u~åBzuyúz³jÉÂ’±¤o’¬ou£€Òn¾¾±`‚UééãIX0~lUÉý2'Œ«ÎߤXßÚVqÝxIOVnåeòu{.;s{ò5’„2麹ùúƲustEKñ0¤a× .)¥Æ¬é¾æšù Ú“é]Ÿ óf•Ýã½\HõБ¾¹gEÓÂ~¯=¥Æâ3Žì;WLOdLy"™‘Ú—±XXTêõ0a\u~,u «»¢kSãÁR %Ay±ù^²¢ïšùsŠö ={;ö¬ÎŸ|dçP™7».Ú›¿‹^–ï³ò}à‘õI˜Yl[¢þìÌíÉ·²À¦¿.5¾ž{Añpv×¾ý±xáe=ט2{n§Wñ¦_ÉËróõRמÌ’ßÏ ’¡#¼„!’^é˜LŒJIÿûÖl±Á^ú}óЇ|ã+ØW£Ü$(¹ïÚ·¿â‰è±Ø™Û“ï”û»ÛšL&‹MJ9¹$z”šxìÚ·?§µØªËtÝõW¯I­èꑚèýz¯t°ÞŸôï\®Ö¦NšXrå&ôî“óøx¥'ž¦of•z°·†Tߘ^}üÿ”³jò_·÷ ÍG&”SΪÉO˜ÒuÓ0oVÙ¶Hêªw(Zl"UN:¸\µdà’ }J‡µúš7_ßßYµ¸ìk÷Ç%jºu뎊®Åê1]ÓófUÔ·ïlïð”Á }Í„^¡\¯‡ö5Éþœ¥ækÅÆZI_²%u°L¹ŸéÚ†‹ã¹‡o‰ï¬Z<ä{&NW«—.Œ7þçÚxvÊü#Ùiººãöûšã¢Oß: úHÏÑú;O¢!Uƒýµ_ïqjr)èêŽ1cŠß$<î·O/2/X%ÜÏ8=¹¶–0pìᤰ½ÐÞ‘|üX¤W9ö7ÈKÿ{±Õ‘•<ú0\ƒÔô# éƒD†Kú{´lÙž?±®¿6.v'—kæÏ‰­ÏçzÖéõzXŸZ¹Qt£ôÔà£Ôê‚ÁÓº‘«‰üÏUÁA•"Ô;„¹¾ŸiÌ‘ß[]‡¼Ð(+y|| §'¡=?í,»ç\z´u[®ì©ÄéšHúö-ÛrùúO‚ø¹GoK«Ûº-—¯£dwzB³ûµý×[ýôÉѲeGɉ^%Cn}>W0ùÛú|{E§‘Ã`UªO@_Sì5ýB®#¼Ý?¸\@”_ýÜO½ tÒãÄô)ÈýŽ)Û;*º¾0LãÔ8l°Aòî}©yX?O­”V^&¯ß=¯uV¼oj±ùÞø äR_7¹ ½¶lËÅúÖ§óãèd_ÊR§­÷í·+?O"]ÃÇ2¯Ûú|.?ÆÝº-»_ë,ùdcúýý…ÂsgוÝ+û®MO”}R1ù›»92t„—œôå Gî¸èêЭrß_g4š§Ž g0:/´ïé3¨¨ýߪœì/™ËÖlŠ·Þ>µ¶ÅêÔ 'L${Aövœl*]Qyàm.ú7ÓÇÓƒør{3‹¹ÔÅ íÁEòÿÉJ‰¹³ë¢eËŽØú|.®m¸¸ PIïw™žıÞ`›{AϤ)9uÖ ãoîÚøDÜžÚ[º`œ<¶*¦LšXQPTz,]Uö߇ëúÂÐKÃûšØóÓÎükk¤æG½÷k­ÄÖŒ?Ñ’ì_¹|Qc\ÙtwþF`¥}Ú@‚º‚кȼ®¿ÀùŽuͱvã%¯1õÓ'÷ Ó!r×~ûùöÅ;„—œÔ晼$É@&ýÌmù ƺ[®Ó˜ƒ4嬣–UK¬ü,g 0qâI²ZßÚ­[wÄê¥=ûÓ¤÷êï‘ðd 5Ò7Ò¡ýh´Û£+o¬p0_å…FE¯©u·|.¿eÑmijßRòc×ÌŸSQöžäôçÚùsò+A“kA2ÉHúù¤ÿOçJ+?¶ª`‚6~“›c .×­¸./™}úÖØýZg,ºí˜›:ä²néêMûØÍ½ .æÎ®‹)gÕÄ”I=|lÙ–«xåuñ/Û˜òøÒ0ofþ†Ò@ÇeÛŒàžÁôaéˆÒC-Odžï¶ÅøqÕñXcÀ©“&ÆÍG̈žÕ•ôkù»¥ƒÄ¶Ë_\™Ž“ rî캨Ÿ^›ßúå¡–§û„—éqíc|ºè±•7fbõìÉDxÉIÞQΊekzND[ÿݶŠ;¼t8ò‘içô¹èö÷x[ú.ÝHwbýí™UF"¸H_ô“ë R7-¼,Ö·¶Å®}ûó«“’Cf¦œUSòõ”®»þŽ ñÀ(<Ž=eRMÄó=BuÆP+öøx¥Ÿ7Ôêë&Ç”³jb÷k«\Òû“%+0“Io²ïq@ò(x¦”›˜¥1¬¹Ôå÷ Lá]ÝzlFÓ®}û Ì{üÞ¦•>¾Û_¸9~lµ¾î8Ò8oVþZ»vãýn?’–>X¦Ô>’/´ï)ûzHæG•ì›l;2˜ñÔ@¶`hÙ²=Æ«ŽÓjùæÕî×:óóÏJÃát[ fܺkßþ²O7¥kx OA¥Oð¾fþœ’‡P{T?Àöw )¶J6=?Î5f¤9°‡“ZúÐô…°?w¦:Éd/ªô!?è' ¬ôàŒáRîБ¤ ÒT:9+uêsÙIfªclîçóo¿¯9®lº{P§ßqbª¯›œ\%¯Ÿdð›>‘¼O@ª»ô¡åjbüت!¤ EÈ1˜P¤Òßùʦ»cÙšMº ‰Þ§'ñ•z-ö·‰}óSÛãŠ/®Œ;Ö5øñϤƛ·lÏOJÓ5œIŸœü,½'¾}T?u“|†~)¨Ô¼ÙuqÍ‘GØ·lË9±”ãBú½›Ëœ6^êÆ`RsÉ ¼R’ƒ}z×L¥cʵÏ)íI7ú/™UÐTz@bÏmù¹K©…(åñMÏÿ*Y…›>œ­Ük'Y]œþ}*íSšŸÚÿiÙÝqùVÉ£æéÐmíÆ'*úœô „JW';¤¨¿ž1À•Ïé¿W©à²ÜX;飋]Cºº‹.øÈüaÑ­Ä¢[0oBÂKNz«–,ÈÿÿUMw÷`.ºõ£ÿ_PW0PJز-Wòëèê.¸#=ûíÝqKñŽ=·'üô~ ¢¿Ó¶lË•$ÁnïN~¸êüäl}k[ÉÁÊÎÜž¸c]s4?µ=väöxá’—ìOײeG4?µ=ÿ+÷(OúÆÅ†ï¶• F¶lËåkb "½ðRñLwíÛŸÿš#=1Hê°ÜdmíÆÇ£ù©ín!}=OŸ>^ª®’kþ®}ûËrM_{$¶lËÅ׿ýø€W$!äžoÏ÷[½o>$“;ïo‰]ÝEøJ×Í]›ž([7•žr<°1ÊÂTݶØÃãà:pô©RTöŒ…Ÿ,úo7-eýw<9 ]®m¸¸ ¾«Ÿ1Þ®îhúÚ#þæÙ"Ý·óPËÓùNï3=P¥¾~zNÛwÞpq~ R*X\Tâ`ËÞó‡R¯©-ÛrñPëÓñPëÓúí!$¼ä¤W_79`èêŽ+¾¸2Ýú@´l9‚ìÌí‰õ­mQ÷y$uüت>{]Þ|}c~’qUÓÝ}îÈìÌíÉ?–LJFCóSÛcÑ­\ð·lËÅ7¬Êÿn½W­%wÜvíÛßçs[¶жó‘}‡Z·îÈŸ¦š|›^–o·+nXÕgµ×–m¹¸êϾqt`ëäU &*§&#ÍùÉK“‘åGIJÉì=ÉIŠÇ­Šå‹*{„©ñ’™G~kéSgŸºaU>|xT&ÉcX;Û;âŠ/®ì3èºkãù­4fL¯Í?¾ •Jïo•Ü[¶æ‘X¶fSŸƒ|®lº;?à¿éêËKÖó†Ö¶Øú|®Ïk9™€èê.yzqúÑñbÿžîÛÓuÓ{"2œu3a\uÜdB~ «;®/1¡‚¬H¯p¾³È²d,œ®£ô*ÌúºÉùkÈC­OÇ_\-[¶Ç®}ûcë󹸲éî²7×–/*‹÷ivíÛ_0/÷”#߬[q]þzwÑgnËÏÉÒZ¶l+›îŽ+›îÎÿ×­¸®ß§cÝö@A_‘Ìû’ñßÍ×7Vd§û°»6=ѧ;ÐÕ]°ØeñÂË ¾nzüÙ»’±l2‡L÷éí¼Jõ}å¤çK×lŠ+¾¸2îÚøD~N–ÔØ²5›¢îþ<ÿs•[ÝØräé†ô*Åäû$µV¬ÏLÂÁ)gÕT¼ohï9iÏ5¦ï‚œdN[*Øl¼dVþ:•<Õ·õùÜ‘ÅÛã¢OßZvUeÁßï†U}>v˶ܠæôÏž— Ã’ÉGr§¤”)gÕÄc+oì³jr¸êxüÞ¦¸üÈ èʦ»c꤉1嬚‚“L+íd‡ËŒéµùß1é’Ÿmüت¢û-_øáýù IDATÔÍOm?r²óÑÏMÞª% òmا͎췷kßþ¸ü =´ÿÞ¦˜7»îH€¼0ßù/ºíXtÛ1ov]ì~­³ ÓÍv#›’Õ»ë[ÛŽÊQÁʧd¼è¶ò¡DR¯é×Ýø±Uñتů’¾fþÅñPk[¼ÐÞ‘¯•©“&¼Ž[yc~ô‘¾ÖílïÈ·ÕEŸ¹-ÿ;ïl?ºRtüت¸…ƒÈœôéã¥Ü¿âº¸²éîØýZg¬ÝøD¬ÝøDŸþ¨§žæôÙûlÞìºüžcéþ¨÷!AÉI Ò»_ëV–ºnô®›º?øó¨?²fºnfL¯–ºi¼dVþwIw¬ª¯›\ðzMê%™ì'õ½|Qcþ†ã®}…׊ÕKÆ[owÇúÖ¶¢O3͘^s/¨+8(1uÒÄxlÕâ¸rÙ]q «;–®ÙK×lŠy³ëŠŽÅGò€úwmÃÅ1a\u|þÖ¿)¸Æ—2~lUÜËçúý;&ãÄ‹>s[ɾf ×Õd¾÷B{GA6a\uÁëõšùsòJ–¦û”ôç6Ì›YÐÿÕ×MîÓ÷MW¯oþzÅ}ɺ×Ų5›â­·•}R0=ÿ+vC.©ñíùy]îo¿S'MŒÆKfåç„åúÌd>=ЕÏ×6\k7=‘go}>—_홌ß{ÏKwïÛ‘êó[ucþï×üÔö>dNÛ{7ý÷KæûÆUGýôÚ>óÖûoùܨ|KÉ×êë&dzßRðX\ú‘ê†y3ãÙ +FuEÓã÷6åï8ílïÈÿl×ÌŸßÛTôw›:ibÁç¥;ý¹ÔÅã÷6•=ÕqÕ’…ŸÛÓ!ì)舿·©`¯°ôÁH3¦×Æ÷ïm²ŒƒÒ‹ ê´ÒÉȵ Ç÷S¯ë¤^“Ï5óçijß2 À<¹‰qMê1˜Þ¯ãÑœ,ÝËçbÝŠëò½äw>ú¸ýœ²×8¨¤Ê=¾—î+§VÞ§û£¤¿½¿Ä×¹ÿ–Ïõ9x«ÏD-F[U9uÒÄ‚½¶ÊÕåý·|.[ycþãw¶wäë&YYQªÿªºõø8Ç‹ûoù\~õä®î|H²³½#æ^Pß¿·)n¾¾±ìÞsIÍ5Ì›™í'×…þš7»®`,žŒ)“ëÄŒéµñØÊ)3ªñ’YÑÞüÕX¼ð²’›N9«&V-YíÍ_­hLuÍü‹cÝŠëbüت>}ͪ% Jö5ýõÒ+}“~!â芻R_·÷ø³Øç~gÕâ¢s¸t›èêО­×6\íÍ_kæÏ)Ù¶ãÇVåç«¥N*6¯Kß°¼éêËãû÷6•ì3/¼ì˜Æšé9c2ŽMÆïÉ8ö¦«/Ïÿ޾ÛÖçï÷Ü÷Ī% ~ÓkcÕ’ñÜ÷Ą±ÕÏ’ë\þIŽ#×97G†Ö˜Ã£ñìÇ\.kÖ¬‰ˆˆßúÐØø“†ÿí¤ùÝwæöÄ·t’ƒ¹sÒ{èÑÚWçöûšów¸ßyîþüÅ>éhò³¥?o0í²e[®ìçõ¾3>ض?žÜ´îÙüÿó›ßT£ ÷ën(Vø¦¿æ„±U™ ÓµÔòtüàùö˜2©¦ì^q?~ioDDŒWu\Åùkîp¼ŽeÞ8˜ßo¨ÚäXç»•Ôøpö™ƒ¹Æ,]½)ÆŒ‰˜1­ôÕéÓÆçš·f†ð€Q÷ÖÛÝù=æÎ,®¬ÝøxþñÌJö¸†ÁHïΉ%9ô«ñ’YEWI¦O‘Ÿ+˜Ì á%£®aÞ¬¸ý¾æxëíCqÅ «bÕ’…1eRMLþPMìùig¬oiˇ›3¦×Ú·’!µkßþØóÓÎx««;¿Obýt[ƒœH/™•?ì룟¹-–/jŒÓΉñãªû\cæÍ´ª2C„—Œº©“&ƪ% cÙšMq «;>ëßý¸äÐJ[·åbÑm¼ï¦«/Ó0'ôiá»öí/yi˜7sÀ91¼„—p’˜rVMŸSá K®m¸8æÎ®‹µŸˆ­Ïçâ…ÔÉÐ3¦×Fã¼YV\2,Þz»»`î´|Q££OàkÌëšcg{Gþ3czmÔ¹Æø»gðN¢‹´Y7uÒÄX½t¡†`DÝtõåqÓÕ—kˆ“äceåñåMd‘ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@&¦ €“É »þ-^}ó†(£fÜûâüÉ¢úý¦Œ¨Gõ£Ë+Ÿ!ÕýλñÕÿöb¼ùö;£Œª÷‹.ŸÓ&¡1PêõˆzdííìŽu¿¤!*ð~|J\rþ5êQ=¨òØ8CêÕÎn³ úůâ‡íû5êQ=¢QŒôëìw5B…v¼Ò©PêF•—0J:»~®@=ê‘QôUc=GÚ›‡"žÛûž†@=ªGÈ á%Ãæ·ÎŒ¸iÎé"åu¾w=ó+ zT¨G ¡É€3«"~¿îT Ñ«ŸÛ«Pê²Ãm “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix dÒiš€Jýü¿ŠÿõZWÙyõÍn U¡C´'¨GõHv½úfwŒ3¦âE=CZ/]¿ÔkE=ªG†Ö±¼FÔ£z¤üœÂKбŠ¿nýg 1TíÙÙ­=Qê‘ãØÿûÌ EϾ´?ž}i¿†PŒ2¯õÃÍcãRUU¥ ÃõX]]­a`ÕÖÖ¼]SS£Q`yæ™Æ¬À¨³ò’²êêêⓟüdtttTôñÝÝÝñꫯj¸ 'ŽäœÜª««cΜ9êQ=’ázlhhˆêêêØ¿`«½Ô£zdà&Nœ—]vYÁûfΜ÷…êQ=2qþÙEÿíÎÏFDÄg>ù;1åƒg¨G†ÝÌ™3cæÌ™ÇüuÔ£z„J/9î,üòßÅî#Ï?¯»¦l‡ðÂ+ûãß}éôû5ëÏ›76ÔÇg.ý>ÿö©¿èùüÿïÎÿ#æ~älP Õ#”´ûõƒñ…¿ÞœûçŸ]¶Fv¿Ñwn|®ß¯{çÆç¢þ¼‰qï²ÏÍ…äó?qþÙC–€zTÂKŽ+-?|9?1‹ˆØ°ù'ýÞ=;ÚQM*Ú9íy£+v¾¼?®ÿë'cçËÿ+}BCƒzõ¨apõø£W""bƹã…Wödž'RqÀÿ¥«/,úþݯwņÍ?‰/ï…_þ»xæ¿,ÒÏ Õ#d™ð’ãÊú'’ïHîÜø\lx²òÉÙ÷¾ü‹¾ÿÀÛïÄŸÝÿ÷±aóOâî–¢ácçYAêÔ£z„A¹»ygDDüÕç/þÝ—þÇ‘› V´«\ÝÞØ8#>þ'Æî7º¢åG/Ç5—þ®Æõ'§sÜ8ðö;ÑzäÎÙ õ1ùãz:‹¾|L_wÂØ÷Ç}riLþÀ¸ˆˆØðäO46¨GPêlë_ÝotÅ䌋¹9;æôÜžúÙ|ìõSÞoæWJ·üð êNÂKŽI'3ÿ£çÆ„±ï†#Ïú!šLÍ=²áòî7jlP Õ# ¼Ô]R7 ;·àýÇjƹ#"â­Ÿ½£±A=ÂICxÉq#YòŸt:‹ë#"¢õG¯Äî×}BeRêÔ£z„Á*XÝ8£§.?z^Œÿõ÷ Éj興¿q_DDLù€C@@=ÂÉCxÉqaçËÿ»ßèŠñ¿þ¾høèy=ÄÏÈßíºëÈÄmÐ³×Æ_錈°Ÿ¨GPê¬åG/ÇŸ½“?0.úpÏjèžÚ<ÖÕÐëŸüçØùòþˆˆøÄG&ipPpÒp`Ç…»›_ˆˆž»eé“Ünlœ_øëÍñðæÜ NA=ðö;ñ÷/¾wn|.üìÿëï‹Ï|òw48¨GPêä-=õ˜¬~N×ã†Í?ɯ†.wPÈŸ-úþÖ½’Jfœ;Ñá  á¤"¼$óÒKþ?siáÄ©á£çÅŸýúßÇŸ½ëŸüç²GuãÝý~¯¿úüïUtò¨GõêHì~ý`>ÌHöMÔŸ÷›1ùãbÏ]qWóβ7îÜø\Ùïó‰ó'Å#ñï58¨G8©/ɼô’ÿÞ¬%Kÿ7lþI<¼97¨»^3Î3Î_ºúB3P Õ# X²EÔŒ+z’ñÔž{Þèêw5ô—®¾°èû'`\Ì=ÿlµêNJÂK2ïá͹ˆˆØóFWÙÕ![üjÙ¥ÿÝÍ7jLP Õ# [=î~£«ìj­þVCéê‹4&¨G á%™¶ûõƒ±õǯFDϲüR^xe¼õ³_ÄŸ‹ûþäR êÔ£z„Ñò×ó{Ö{„ôÎÏÆþqß WCêNVÂK2-Yò?ã܉ñ½/ÿDzÏŸ‹Ö½Þ~§àÐ@=‚zT0\’S‹>z^Ÿ-ÒÞúY}üà÷õ»P@¡S4YVê ‚Þ’PüìhùÑËÔ#¨GõÃn÷ëóõØð±sË~lÃÇ΋ñ¿þ¾ˆèÿ @=G /ɬ–¾»ßè*˜|•2åƒgÄü#'É}£åêÔ£z„á¯Ç#AÉ䌋†×ïÇ'5›¬†Ô#Ð?á%™•,ùŸÿÑs+zÌ-¹»¶óåý±óåÕ€ A=ªGVwÙ¡á£çVôñÉji«¡A=•³ç%™tàíw*^òŸ¸æÒß?»ÿïã­Ÿý"în~ÁÁ A=ªGÖzLÂþVA'êÏûÍø«Ïÿ^¼õ³£«¼¦|`\|éê égI>ÊÆùàÕ#œp„—dÒ„±ïîæüy¯m\TðöÜœ=¨¯“v¬ŸêQ=‚z„³¿tõEþ¼ÅõoOùàƒú:iÇúù Õ#d™ÇÆ€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™tš&`¸üÛ¡ˆï½ôž†HÙÿ3ízT¨Gõ¨ÉŠy3âOZ©!@=&¼dؼy(âïr¿Ò õ‡&Nœ¨@=¨óØ8CªººZ#ThÖ¬Yõ¨Q¨GFXmmmœy晢UUUññ\C Õ#Œº1‡>¬J;vìˆŽŽŽLýL/¾øb:t(êëëãôÓOõŸ§¦¦&fΜi2ËIY‰ŽŽŽØ»woLŸ>=jjjÔ#êq„üò—¿Œ_|1"">üág¢_TУ»»;|ðÁ¨©©‰«®ºJƒÀ(×bUUU\uÕUú%eÂKNxßúÖ·â™gž‰ˆˆsÎ9'–.]ªóQÖÖÖ>ø`DôÜI^ºtiÔÖÖj·ß~{ìÝ»7""fΜ7ÜpƒFŒ¸çž{bÇŽñÉO~2,X Q@-ÂIÏcãœÐÒÁeDÄÞ½{cõêÕÑÝÝ­q`”tttÄ£>šûСC±zõêÌ®…­_L‚ˈžÕ ßúÖ·4 dÀ“O>™K""6oÞ\ð602vìØÑ§s¹œ†Q$¼ä„ž ¥ƒË„FOGGG¬^½::Tðþ$ÀìììÔH0Âýâ3Ï<<òˆ‚QîÓ7öÒu«o„‘ÓÝÝ]ô¦Þƒ>hþ£HxÉI1A»ðœSâ?ןš[€ £31K—¿vZÄÿý¿Ÿ¿vZÏ¿:t(¾ño¨KO>ùdŸ~ñÂsŽ7oÞmmm FAwwwÜsÏ=ù·'1&~£ª°oFƃ>˜«þFUäÇ©ÑÒÒ¢`”œú—ù—©8‘ .?=óÔ8{ü˜8³jLüøõžm^</¾øb\xá…™:¬Nä‰Ù›o¾=Á›æœ¿]3&~÷§DÛî÷Ô% “¶¶¶øö·¿Ý§_œñ¡Sbï[‡ãŸõ¼çÎQSScÿYa÷ß¼òÊ+©þñô˜ñ¡Â¾±»»;Î?ÿ|ÃÜ_~ï{ßË¿ýù OßýÀ©±}_O-¾òÊ+Q[[úЇ4Œ0+/9¡” .ÕZ #­»»;V¯^ßg/ .Ï>cLDDœ}Ƙ>u™^ÛD,9+"â·ÎŒ‚~ñÓ3OIGj1¢gʼnýgaäôÞçòÓ3O‹3«úöVGÃðêìì,غaîÔ1ñÛ5§ÄG>4&Îÿà˜‚ù¦¹#Œ<á%'Œþ‚Ë„FV:¸ŒˆøÃŸš.KÕe{{»CDàõÞCoÒcâóžVð1U§‰›>^`:@ FF.—+¨ÑOMë JÒ}cz{‡G}TmÂ0Î%Ó‹ÿ~]á¾ô6G雂ÀÈðØ8'LgSIp™ð9Œ\mþÓ?ýSþíÿ\j\T{Jɺ¬:}Lüä_{êrïÞ½ÑÙÙ3gÎÔ0@½÷˜tFOHYuú˜>{ú©câ‚IcâéÝïÅ»ïE¼ûî»ñÜsÏŇ?üá?~¼Æ„aÐÝÝ_ýêWãÝw߈#«¢gÖçã¦ÕŒ‰þ×ÃÑõNOm¾üòËÆ«0Äž|òÉØºukþíÏ_xj|pì)ýäÔßÏîí£þô§?õø8Œ0+/9î 4¸LX #[›ÿñÃ¥ƒËļs W™<óÌ3ñä“OjL€d«†ôáXŸžY<¸LT>&nšSx€–Gã`øÜsÏ=5Ú{Utº6Ó«¾öîÝ[ôTr`pzÄó©i§Äo×ô¯þvÍ)1wªÇÇa´XyÉqm°Áe L---±yóæ‚ÚløÝÊjsƇN‰Îîˆ}{êòÅ_tˆT( .{ŽÕ{«†bÎxÏZϿڳSŸÃ×G¦Ç¯_ühá*¯bµyÆûŽW÷îÝ«_„!rÏ=÷Ä믿=O)|vöi%?vêoŒ‰ç÷½?·g%ô믿^x¡F„ ¼ä¸u¬ÁeB€ C«­­-{ì±cªÍ:%^Úÿ^ü[Ï¢§ CzŽñ§¿WYp™è’óôSÇÄ9ã ¯®®ŽóÎ;OcÂ0óØ8Ç¥¡ .!‡¡Ñûdãó?8fеùù Os 2 À£>Z\þçúS\–ê C£»»;î¹çžüÛ¿ufá¡ ýùß’ïmíǦ££#Z[[óojÚ)õ™½oii‰ÎÎN ÃÌÊKŽ;C\&¬À„c®]»6ÿvÏÉÆ§Æé§ŽÔ×KI*ˆ‡ˆ@…}c¹Ã±*íŽø—7 Cåë_ÿzþñÔ_;-bÉï6 >294$½µÃÁƒÕ% ²<˜³–{\¼·©¿QxVGG‡'`˜ /9®'gC\¦'kL¸ädãäÔÔr'Äé§öìÁ÷£§ C)<òHüà?È¿=w꘸lÚ±÷Ó&î?kŸ=¼îsYŠý/ahêñþá""Ùúô¨ÀT/¹‘жû½ˆè9ôÇãã0¼<6Îqc¸ƒË„GÈa`’à2}jêP—‰3«Â)ÈPB[[[ŸÃ±þðüÓ†ìëzæ©qá9G‡‹>ø`´µµix€\.×çñÔb§d¬š®ËG}Ô¶*0€qkº¿îÔ8³jà_çì3ÆÄ§¦­CÃð^r\©à2=(`Bÿº»»ã[ßúVap9ç´! .ÓÄt€©&ah÷˜-çÓ3Oí³ÿl.—ó€ ûÉcÙç²û_ÂàÇ­ézœwîà#‘߯;µ ¿ñohd&ÂK2o¤ƒË„ú¦O6N‚ËÁR‰³Ïøá¾5 '£ŽŽŽ‚àrÒc†µo¼éã…æ=÷Üc¥Tàž{î)¸Á÷ù ‡fetÕé=5Ÿ¾©÷è£jp(£¥¥¥`ÜúéYǾ-XºïÝ»wo´´´hhö¼$ÓF+¸LØJûÊW¾Rp²ñÿuÁiñÛ5cF¼&"ÂÉf¸ö˜-'9@ëù}ïÅÏßµÿ,Tb¨ö¹,Åþ—P¹\.ßþö·óo7üî©ñ»¿9fHê0}À]{{{Ìœ9SßClÌáÇk²¨wpYΤq6ïØÂÄÿöïÆÖ]••Ã9çœK—.êêj(ÔgûÉÆµå•÷â¿¿ø«üÛÿøÇ㳟ý¬? '¼ÎÎθýöÛ VrÝréiÃ\¦½zðp¬m{7~Þ“›FMMM,_¾\½är¹X³fMþíOM;eH/æá¿ŠçööRUUK—.`BJwwwÜqÇù=)Ïÿà˜![XÛöËø—7Îo¾ùf Cè4M@VU\FDìëŠø_ïÓæç•—=w¶;::¢®®ÎŠ“NïàrÒ¸ˆûyÄ÷^z¯ÏÇžÿÁ1Çôù«Ç?¾^¼6í´È(Ï<óLÔÔÔDCCƒ?'ôäëßøF>¸Œè©ÿç{ïöùØ3«"Ï9}P‡$µ÷_Ÿ{7ÞtÊ1­Œ.öT™U=7#zžH˜>}zÌ™3Ç…“VïUÐçpL|äC÷ÍʼsO‰^ûUþññ{î¹ÇÖ*0D„—TlæÌ™qà 7ô{'¹µµuH¿ï™gžÙזּ¶V€Â ïþèÊþ{KKË×ßüùó=úèìììS?þéáøñOÕçcÿ.÷«øÊ¿ø~íÉS •xôÑG£®®.jjjüq8)=øàƒ[­ÔTé³ÍѤq=7ãÇ?}/à ?f­“?¼§³³3ZZZbÁ‚þpŒ„— ÈÌ™3û=Ý{¨ÃûYp"ùù wUÈS éÐN6±cÇŽ‚÷my¥øv*+.øJèWŽÿú¿ê÷ã6oÞsæÌqˆ#á%À1ª©©é÷é„¡¼¹×ßS Ó§O·ê*ðf÷{qfÕÀV_¾ðÚ{Fð`ô÷tÂP†—žJ€ÒjkkËÞLhkk‹7ß|sH¾W¹½Ÿ§OŸnÕ% á%pB)w3!—Ë YxYWWçF ³S4EÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Iº… IDATx d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%IÂK “„—@& /€L^™$¼2Ix d’ðÈ$á%I§i²ª­­-žy晲³wïÞaùÞ«W¯.ûï555ñÙÏ~Ö ` /ɬ|pÔ¾w{{{¿s饗Fmm­?À0ñØ8™5mÚ´Š?ö×N‹8ûŒ1Çôý~ëÌÊ?¶ªª*jjjü‘†‘•—dÖýÑÅêÕ«  ¿ðœS⣵}CÊß®9öþ¦9§Ç«Ç¡_.xÿ«#þû‹¿Ê¿]UUK—.êêj$€adå%™U]]K—.sÎ9'ÿ¾çö¾ov÷„•éÿ†ÊÙgŒ)øºU§‰¿Ëõ .=.0ü„—dZ±óÛ;Ïv¼7ìßûÕƒ‡cmÛ»ñów{Þ\Œ,á%™7¦à`ô /9.Œd€)¸Èá%Ç‘0ÿÿöî>*ª;Ï÷ýÅ ð¢@´FÒŠÝJeFs[â4šŽæÄMLœÕ® dîÌ1êŠFWǤÕ)t:Dƒ д¥€$j¸ÀÞTQUˆÊÃÞÅûµÖ¬–]»jÿöoƒ>~¿¿Á%€u^ÂVz3À$¸°ÂKØNo˜—ÖCx [êÉ“àÀš/a[=`\Xá%lífL‚Kk#¼„íÝH€Ip `}„—ˆ×`\Øá%"FwL‚Kû ¼DDé*À$¸°ÂKDœp&Á%€½^""… 0 .ì…ð+T€Ip‰`ïÞ½*((PUU“°µh¦‘Ì0wîÜ©¯¾úJK–,!¸DDs»Ý*))‘$µ¶¶ê‰'ž`R¶Ex‰ˆçp8”——ÇD ¢577kçÎfpiÀÎ/Àæš››•ŸŸ¯êêj&Q,^ž–}Ø‘Ãò•—_}õO :1‚KÿÜùqæ`%8¤£a"„­Ö¼L?yJq>O €%Õ¤¤¨îö”^¿NçàòÖhi©+ZS’£T^ÿ1ذläÈ‘#òx<æ×·FK+³¢5fX“ˆ8„—`×\öÕÚ›ôÂK°½{÷jÏž=æ×·‹Òʃ3$|Åeuuµòóó™<€m^€Å¹Ýn•””˜_w\&8IºÊ¤"á%XXçàòIQZê _q™#ÝwÇ ËlÜó9ó7Œð,¨¹¹Yùùùª®®6M;HK]×çO,‰Ð`ƒ˜°–› .ˆ$T^€…TUUÉív—‹¾?XÙãù·&ÀÀCx QUU¥üü|µ´´˜Ç~œ9Xw9 .á%X@çàòÖhé¡ï\6ÂKègGŽÑÎ;‚Ë•YÑ3,ŠÉ h„—ÐNž<)Çc~Mp @ú ùoÌ#IwÜEp @;*/ Í™3GUUU*))‘$¨mÕoŽ]Ñ3+f!fO¹xñ¢Þxã.Ï1b„²³³år¹zäš•••fUísÏ=gw»Ý:{ö¬rss•šš*I*..Ö¡C‡4nÜ8åååñÀ á%ô3#¬ò0·6]ÑʬhÌâõzõüóÏwûy¼õÖ[=vÍÌÌÌ€ðò±Ç“$åææšÇŠŠŠôÆoœ ¼KÈËËSbb¢öîÝ+Iú²QÚz„³§K’233µeË–€×*++Íÿóx|¸YuénOU}@¤ ¼‹X°`ÍVã/¥^aŸàú‡‰¥¦¦ê…^Çã¹éðÒ*ýÉp!¥q. ö€…dee´}¥­óÜ¥V&ç&”––JR—Á¥ÿë•••7}ÍC‡I ^^k€6„—`1YYYZ¿~½bbb$u˜'j 0o„Ñ.]»-ûâÅ‹a_óx<ºóÎ;¥¨¨(?^=öXÈ÷!eçk!ªÿ±¢¢"Íž=[«V­ øŒÙ³gkáÂ…’Ú*3-Zd^{öìÙ×è|¿=ö˜ÆoŽó…^Ô¶YÐìÙ³åv»ùÆ` ´€9N­Y³FùùùjiiÑ×WdîB~—³ëw:w©UÿYgÿ ó«¦ïzäs‰¡I’|ðAóØÅ‹ÂÂììl1B^¯Wn·[ÅÅÅúôÓO5bĈ kfgg‡‹åeQQ‘Š‹‹Î-..6½ñÆZµj•†®ììlUVVšaf¨ëΞ=[/^Ô¸qãÌóŸþyUVVêÂ… *..fc ¶Ax e˜;vìPCCƒ$éÒ«’6Àl¹Üªýã&ÏO¸ ±3·Ûm®7jTŒÊÆ¢¢"³-<œ'Ÿ|R[¶l1¿~ã7$) ˜ô—ššªáÇëoû[@dW›õ\O5æ“O>²JÒøüÔÔTósŠ‹‹5|øð àÒàr¹töìYª.Ø á%Ø€`îܹS%%%’Ú̯¯´­ƒ3$*è=‰‰‰Z°`Á€ž7¯×kn¨®m|Ö¬YJMM5ÿ×ÿ½•••>|x@yg.—+ ¢Ó$‡ðy]µ‡‡«Æ D¯ù¾Ê.\¸ðšU•ìtÀN/À&‡ÙÎl˜'j[µµéŠVfE‡ 0:ÿ Ï»+\›wg« C’/^ ¹ãy¨køWcú‡ŸïéZUmæT^°“ALØK^^žî¹çóë/¥­G®¨år+“ÓIwB½pŒŠÍ®*CUY†ZïÒ8–™™yÍÍz®š†ú|C¸÷\¼x±Ë÷€U^€ -Y²D¹¹¹æ×_6JÿúÇ+:w‰¹ñ×b†bì0îßVn’ת°ô¯Æ t†s¨õ4;_§3cÏÌÌL¾)Ø á%ØTVVV@€ÙÐ"íþËU&ÆôÝHxi„ƒgÏž `ËãñH’ÙÎ/u§qÿjL¡ÂO¡ªI;oÔù|cã!ZÆØ á%ØXVV–žzê)ÅÄÄ0!Ÿtc¡ÝÂ… 5nÜ8UVVjõêÕ¯kÑ¢E’¤çž{Î ¯µ{øVcv~Oçõ0𴸸X=ö˜*++UYY©^xA³gÏ6ÃWÂKvCx 67qâD­Y³†³“pAâõ(**ÒðáÃåv»5~üxÍž=[wÞy§>÷Üszþù烮éVVVšááµv7Þ®½;Üž©©©zë­·$ÉëøñãõüóÏTç²Þ%»!¼€àt:µfÍ%$$0~²³³Ö£¼^.—K^¯WO>ù¤¹cykk«rssõé§Ÿ—á®yñâEegg„ˆþçúŠÆ¹]9Üëyyyúâ‹/ôÜsÏ);;[O>ù¤þð‡?´´Sy Àn¢™ˆ N§S6lP~~¾êëë•••5 çcÕªUZµjÕMNjjª¹á͵„ 3].WÈtŠŠŠ‚Žååå„ݹ'ÿ]ÑSSSƒÆ`|ýàƒòCÀv¨¼€âp8´aÃ-[¶L%%%:rä“á¼^¯FŽ©;ï¼3äëÆ¦B7S ý…ÊKˆ0GŽ1«S§NÉårÉáp01ÊØ¸ÇØUܨ°,--Õ–-[TYY©ÌÌÌ.+:Àª¨¼€â\ªªª˜˜–ššªçž{NR[[¹±YÏÂ… U\\¬ÌÌÌmë`T^@„عs§<ÈD @Ï?ÿ¼òòòäv»UYY©ÊÊJÍš5K.—‹vq¶Fx Àív«¤¤„‰ÀBmÖvGx 6ÖÜÜ,Ç#¯×k›>vÎ]jÕ——Z™ €­±æ%ØTss³òóóƒ‚Ë¥®ÁЉ&¸Ø•—`CõõõÚ±c‡ª««Íc÷Ý1Hó'frƒðl¦ªªJùùùjii1ý8s°îrRLˆ,„—`#7\¾þúëLÀ–/À&Ž9¢;wšÁå­ÑÒʬhÅä⸅¿`ô€ 9rD§ÛÁåÔ”Áº•ž¬Û‡EiJ2ÿ™ûãW[°¸½{÷jÏž=æ×·‹ÒO¦G+!&ü{²ÇRöx‚ €½^€…¹Ýn•””˜_ß>,J+g VÌæù/À‚š››µsç΀àòIQZ꬘!¬c/Àbš››•ŸŸ¯êêjóØô±ƒ´Ô5˜É (,ˆBp @*/À"ªªªTPP úúzóØ3ë.'ÿΘ/Àªªª”ŸŸ¯––óÁ%` ã·b°¯×\.ú>Á%üf àr¹c~½û/WõIÕwL `@#¼ p:Z³fM@€ùN)&``#¼‹p:zå•W4vìXóØ;¥Wµû/W™À€Dx âp8´fÍš€óÐßémï&0à^€ÅæŒ3ÌcG«[õ›cWÔr¹•  „—`A‡Cyyyæ‰ÚVm=B€ 8/ÀÂòòòtÏ=÷˜_Ù(LÀ€ÍØCBzº†ÄÆvëܺÒÒ^Çå¦&5”—_óøÐøx˜0A’tñÌ}ÓØÈîӒ%Kät:åñx$µ˜/¼¢•YÑ3,*ä{}ñö¼ª¯Y*`c„—61må %efvûüªÃ‡õù®wUëõöÊ8êJKõ»'W]óøÈ´4Ý»e³$é£U«{|<èyCãã5%'GǶog2,$++K’Ìóë+m˜áÌã5—z†Ãá`ÐohPΙ3uï–ÍrΜÉd Û’].=øÎÛÊxd1“aAYYYZ¿~½bbb$u˜ŸT}Çäè r¹\Lú •—6ôï³f‡þ#=]·ÄÅiÒ#‹å¼ûnIÒŒ§¦¿––ö[»¶¯®N¥nùgXÛh—KCãã™ s:Z³fòóóÕÒÒ¢¯¯Hï”^•$Ýå ýïQO=õ”&NœÈäl‡ÊËÒP^®Z¯WÅ?_¯ª?–ÔÖõêó÷ޣ¶9­Y³F;vìÐéÓ§%IG«[%`"áe„êjÝ¡ññÊ~éE%…X€?!=] +Vh¼y*ÙôZ@Àu#n‰‹3¯sK\\Èã ééš»ùõ 1'¤§+kÝÓ™©’×^ 9ÖPïKr¹”är©|ß~Åßž¢¤ÌLÕzK¯kÜ ééÊ~é%34ì<Æ5ÂÍ9s¦f<ý³ ±9gΔsæL•¼ö óþC-}þ|ÍxúgAÇãR’•ñÈbM˜wŸþ¸acÐî펤¤¶ÏŠÒÔ¼m ò’îtijnnÈ­»Œàò[ŸOe»ÞÕ_½^Õz½íU)𒛣¸ääcKHO׬—_’$}ëóéÏÛw¨¡½*ΘßPÁ¤Á?¸ôÕÖêØ¶íºPQ¡Ë>Ÿœ3gš×¾wËfýÇO~2`1a‚’23å«­Õ O¡Y 9væLe,~XRÛfNFxyæÃõW¯WæÝ§´ymóöѪÕmchßh)}þ|sNgUëõ*.%EÉ.—ù¼þþ‰eª:|¸ß6‰¨òòò$É 0ˆ„—(ÙåÒ”œóëÏßíhOŸ?ß .KÝw»Í×ÊËÕP^®æº:³jð–?¡#›^ëÕñáÚÃÂEÿóIÒØ™w„SssÌà²ó{æ{·l¨öì®ôùóÍŠËãÊ~»Ë|­Ö땼^5œ>­ÿò?~-I‘–ð~£þ[ŸO­Z4¶êÇÍ÷†š‹¿b™$éBy…>Z½: ,ß·OU‡ëþ_¿©¸ädM[±\¿k;N¨÷×z½úÖçSf^®†ÆÇ+ÙåR­×+_M|55íW}Ù¹ªÓÄ/”W|OøjjT^S£o5ëå—44>^£33Í`}'//OßûÞ÷äñx˜ @D ¼´¡Î­À’äHJV|J²b“’ZËv½°áć’ÔVÑç\ú+ß·O£3§*mÞ<¥Í›§?oßÑ«UtU²zÐWS£ 噞B7«Ëv½ò½ åå:îñhÚòå×=žoUWZÚVué\vþüo}>ݧ„;ÒÍãq))f»õq'ìØÊv½kV@ú›0ホjÒmÛBÎû7:á)ÔŒ§¦$—K éé!¯Sêv‡|õáÃæ÷Ðèöðòz´*tKrÕáÃ:¶}».œ.7«5Ñ÷²²²$) Àt:L À–/mhj{{èµ”ízWǶm3¿ŽKI1«.+Ú۳é:ü±öv]ç¶vß6ù‚ŽÎÌ4ÿ\ÝŸÎìÿð†Â˪ǯy¿Cããå«­5çÓàœywÀ†Ûþá¥|~ëóu*ú¿–t§+dxù×ÒÐë|Þè:¦¾š%ef*!=]?Ú²Ye¿Ý¥¿?† {Ñ·²²²”˜˜¨={ö(++K‡ƒIØᥠՕ†ß|ÆWS£ ª:üqЮÏqIIæŸ/\#Àò¾F¦§÷jxÙ|Uz#ýªMÊÌT\JŠâ’“42=]#ÓÒCnä#ICbãžE8áÄØögÔÚÚjîö}-þ×ìû7÷Ê9s¦¹Ù’´Öy½ª:|XuÞÒ›Þà =gâĉr:ª®®f2¶ExiC¿{rÕMÆeŸ¯Ë×ûr³•®B¾›õMcc—;¯‡—’¢)99a7ýñÕÖê–¸¸ 55ý[Èo„QÉiìhÞÉwºtÜÝ7Ïé?~ú¸¦­X.çݦþAfCy¹þ¼mûu·¢£çŸ.TT¨öS¯šëêÔpú´ÊËõ£7¶(ɯ…]jk÷önÔ…òŠ€–ÿ®|{ º'ùjjTüóõ/çÌ™9Õ¬Æ4æîÞ-›U¼~öô£½{÷jÏž=æ×UUUL À–/ÿkäé]VÇ%ûUý5[lóÿñ$w±áÌ—’4õ±¼ŽMs¶o»Žcì褠c—ýÖèŒKI [UÚy­LC]i©’23õm“ÏÒÕ‹ß46ª|ß¾öà_SBzº&-~Ø\'uÚòå„—ýñ³Ñܬ‚‚:uŠÉD„ALÁÀaì-éš-É£ý^o8}ÚR÷áêîâ>œ3gÞÐç•“U6¸rÝ˺O;Æ–ÜÅØ’î ýšv&efv¾Æ¥¤hÚòåÊÌË „ö¤¡ññÊXü°¦-_ò¾ÊËudÓkªúøãöñ%ó×Ǫªªôì³Ï\" áåcTÃ9ï¾;lè54>^“~HRÛÚŽVÛ„ÅWSc†d“~Hq))!ïaJNÎM]çÛ.ÖýœøðÃ!7”—ëBy…$iJNNȲ«±ñÛ~JnnØëOÍÍQÆ#‹»½óüÍú¦±QÓV¬PÆ#‹5aÞ}74gè=ÔË/¿¬––óØ’¢˜€í^0Ç=…fõåÜͯUÑÅ¥¤¬÷xlÛvkÞÇ[nImAàýoþJióæih||ÛZŒwß­ûß|ó†«ÿ|µµ’¤±aÞŒÅ+3/|°h¬U—’¬ßy[λïۃVª²Öë5w“7*;Ÿ›áמ]WÚ{;|•«Æõ+öï—$¥Í›§ŒÅ‡<ß—q.z—Ñ&¾sçNóØ­ÑÒŠƒ5k½ì5/_Mþ¼}‡þaù¯{·lVCy¹¹û¸;y©ÛcÙu ÊËUòÚ/4ãéŸih|¼²Ö=tNÅþýf˜v=ŽmÛ®Y/¿¤¡ññú/ÿãת:|XÊËåHJV²Ë¥¸”dùjku¡¢BλïZû²Öë Û¬&vòøIDATy¹Ë±]è>Z¿A÷nÞ¬‘éiÊxd±2Y¬ºöVùؤd3”½P^¡Cë7ôè¼úeÖË/I’Êv½«cÛ¶éÏÛw˜›óL[±B“^¬¦º¶ wH\œôújkõçí;øaëeUUU*((P}}½yìöaQZ9c°b†D©¼þ;& `{„—Pù¾}j8}ZÓV®PRffPuá…ò •ºÝ–ßpŸI‹ØñÚü7^V>¬cÛ·kjn®n‰‹“sæL³ ñ[ŸO¥nN¾ûnÛñ»ïV\JrÐÆAåûöÉWS£I,Ø}Üqcl—;íþMc£>Z½Zÿ°ü óœÎk”Vì߯?oß¡oz¸M»êðá Ð7áŽts\ÿñÓÇ5mÅró¾;W·Vì߯ãžÂÛ±mÛ52-MqÉÉšš—§©yy!Ï+yí–ß ýçzÛÄÃiûåˆ_öDx [ú¦±Q{òe<²Xi÷ÍÓÈôŽõ-¿õùTuø°Îìÿ°_ÖdôÕÔ胟>®‰?¬¿›93äØŽ{ iGXì&@~†­•ývWŸWVvÇ7:îvë¸ÛÍCB·Ýl›8‘†ð, §ÚĈ$„—ÐÏh 4~3€~äõz‚KÚÄè0ˆ)`E„—Ð\.—rssͯ¿¾"ýæØíþËU&0à^@?ËÊÊÒúõë•`;ôÅwúÅ¡Ëjha~á%X€ÓéÔ† ”™™iû²QúÅ¡Ë:QÛÊ$ÂK°‡Ã¡'žxB>ú¨yŒ6rÀ@Fx 3gÎÚÈÍ€õmän·[¥¥¥’:ÚÈ—º¢5%9ª[ŸsîR«Z.3ŸD‚s—˜ÀÀCx e´‘vìX­]»– Ømã`3ìF(/À†Œ6òÌÌLó˜ÑF~¢¶• DÂK°)£üÑG5mäûO}Çlðl.Tyy=Õ—û#¼€ª»#¼€ª;#¼€Ó¹ü¶ÛncR¶Í@ä1ÚÈëëëåp8˜€-^@„ª¯¯WAAêëë•››«¬¬,&`+„—öîÝ«={ö˜_9r„ð`;„—Aªªªäv»U]]Ídlð"DçjKìŽðl®¾¾^;v쨶¼5Zš’Û!¼ðz½r»ÝÕ–#c¤ŸL[m™à$é*“„3$ŠI,Žð,,\µå?¦FiþÄÁ]þÇVBŒô_§E«úoì8t–èˆ [À:/À¢ÂU[.u Vzâ n}Æ”ä(MIÌdl‰ð,¦¹¹Y{÷îÕÁƒŽw§Ú€HBx ròäIy<Õ××›Çn––º¢5%™Ð0°^€„«¶üAR”–º¨¶ L„—ÐϪªªTPP@µ%^@?:yò¤^ýõ€cT[ÐfSý§ºº:èØ¬ ƒ.á%ô«9sæhÆŒǶ•\Õî¿\UËåV&0 ^@?ËËËÓ²eËc;ôÅwú×?^QyýwL`À"¼ p¹\zå•W”™™ikh¡ 0°^€E8=ñÄ!«0·¹¢s—0 á%XŒQ…yÇw˜Ç¾l”þõWôáiÚÈG4SÖãp8´víZ-©£ sþÄÁºïŽ® è_ü¿GÅÄÄhìØ±L"ÐC‡²²²˜ —^€8NmذA{öìÑÞ½{%I__‘vÿåªNÔ~§»¢•ü¾±cÇjíÚµL ÀVhZ°`Ö¯_PMiTa~RÅŽä€È@x 6eTa>ðÀ汯¯Hï”^ÕoŽ]QËå(& `k´€Í-X°À\ ³¡¡A’t¢¶U'j¯09[³UxYŸ˜¨¦añ<5–Ô×oמ8q¢¹æïÿ{ "Ø*¼¼p["O Âp8Z²d‰\.W@&veù5/ï¼óNžÛq8ývm£ sÆŒæ±Ûn»‡°¨ÖÖÖV+°¹¹Y^¯Wõõõ<-¶˜˜(—ËÕ¯¦áäÉ“ª®®–ËåRb"Õë{±|x ``İ"ÂK–Dx À’/Xá%K"¼`IÑLì ¹¹Y{÷îUUUUŸ^711QsæÌ‘Óéä!ô1ÂKØ‚×ëÕÁƒûíúyyy<€>FÛ8l¡¾¾¾ß®ýÕW_ñú•—°¿›œ¬¿›œÒ«×øÛWM:Q|ŠÉèG„—°˜a·ê6çH& ÂÑ6À’/Xá%K"¼`I„—,‰ð€%^°$ÂK–Dx À’/Xá%K"¼`I„—,‰ð€%^°$ÂK–Dx À’/Xá%K"¼`I„—,‰ð€%^°$ÂK–Dx À’/Xá%K"¼`I„—,‰ð€%^°$ÂK–Dx À’/Xá%KŠf Ð×êëëU__Ýïé/---:uêÔu½'11Q‰‰‰åõzUPP`«1WWW+??ÿºß÷ÀhÁ‚Îá£â4Á5V;ߨ­ó›/}­–Æo̯cbb´dÉ8ÀMˆjmmmeÐ—š››•ŸŸ`þý2ôwßO±åýüí¼Oÿ{çÿÑ•o¯Hj .׬Y#§ÓÉø ´£Ï9­Y³FcÇŽ5ýŸß•éÿûKíî…à ÷^¢_DB€Ip л/Ñoì`\ô>ÂKô+;˜—}ƒðýÎN&Á%@ß!¼„%Ø!À$¸è[„—° +˜—}ð–bÅ“à ^Âr¬`\ôÂKX’L‚K€þEx ËêÏ“à ÿ^ÂÒú#À$¸°ÂKX^_˜—ÖAx [è‹“àÀZ/a½`\Xá%l¥7L‚Kk"¼„íôd€Ip `]„—°¥ž0 .¬ð¶u3&Á%€õ^ÂÖn$À$¸°ÂKØÞõ˜—öAx‰ˆÐ“àÀ^¢Z[[[™DŠææfåç竺ºÚ<ö÷?ÊÐðÑñ—6Cx‰ˆ*ÀŒ¾%šàÀf/‘B˜Á%€°æ%"R¨50 .ì…ÊKD4£³¾¾žàÀf/Xmã,‰ð€%^°$ÂK–Dx À’/XRt¨ƒ/^Tii)³ OdffjĈÇ‚ÂËÊÊJ?žÙЧ¾øâ ¥¦¦š_µWVV2Kú\çl2º«“cãš0)•YÐ+Î|^©¦Ææ¯u^N˜”ª× 72ƒzÅÓ9/êÄÑÏB¾Ænã,‰ð€%^°$ÂK–Dx À’/XR4S¨¢¬Rÿy´L£ÇŒÒèÛoSZF*“ôƒ^¯¼|:çEÝŸñOzlîÊkžû«W=º?ãŸôâŠ_öȵëÎ×ÛÛv_ñÐ:ÝŸñO:þÉgÇ_¶@+Z§_½êÑK+~©•­ëòü¾òöö]ò]j <¶m—îÏø'½þL復Œg^äù€Ÿ0ܰ^/¿8yVR[øÑîC]ž{¦¬íÜ©Ó'ßôuKÕʇÖÉ×úù.5éLY¥$i¤qçh߃9óµtùb=˜3?ìù}¡îÜy=6w¥Š<(nXlÀkŸ÷Ϙ®ç™O¹k2?a¸a½Ú6^wî¼|—šïPSc³Þÿ÷}ºwQvØóOm«lœÐ­ÚE…ûä»Ô¤ “‚?k“g£$‚ÇÛ¯=wa¶þù™\ó¸ïRSÈóûÂñO>Sݹóš"Ì]º|±æÜo¹ðÒxæ’h¹ÀMéÕðÒh³6ÄG?ÓñO>ÓÔyíÕ’B¾~½Œê¿Îá^ܰ؟oT}v® w~_8Ñ>¡®oÕ`ÐxæS¦Su €›Ó«áå™öÖæ¤1£4å®É:qô3~2Œ;a¡+ +Ê*õ§ßSݹóŠ‹UÒ˜Qš³ðƒª!Oý, ú¯Ù׬G?3Ã´Š²J5ûš5~â8Å ‹5«=ÿŠjûóèÛG)į óýù.5é`ÑåklRݹóJ3Jsf+į÷êü)Ó'ÍGݹóúë—çu¦=€Uû˜Œ1¯;â!CÌ’ƒÇtæóJùü¬&LgŽ+ܼúߟÿ<'¥ÿïù×Uqj<ó ãÌû­ø¼2ì½óòÅɳaï§«û­;w^Š©éR³|M×|°Þ /Ë:ªgÌ™¦7ãúÓÁŽ`,àÜÏsSŽû.5éå•ù!7Ëy{û.­þ—ÿ¦s§›ÇžÎy1àœ§s^Ô„Iã´m÷k’¤—Wæ«îÜymòlÔ„Iã‚Î76ÀÙ°u’ÆŒ 8ß?xûh÷!ýzSaÐF:ïîÓO×åµÇ¿ùj¡Š Co`3õ®ÉZ¿uþêUþtðXÇ}nÛ¥·%m}o“â†ÅêÀîCz{û.=˜3? Ì«(«4Çk(9pTR[ý&÷†  òÍW uâègÚúÞ&½³ýÝ 1¾_¸OÿÏGÿ½Û¦ñÌ%iåCëÆ"I¯ËÑÂÜûŽØ}Hon*Ô”é“õZáÆ Ï,*ü@ïî ºßvÒægƒ7,z{Û®×€½ôê†=þkXÆ ‹ÕŒ9m!c¨ÀÏ„hóö]jÒº¼—tü“Ï4aÒ8mòlÔeÿKoØjn¦³ùçÿfdÆú”?œ3MRÛú•›<µú•eæëƹF¹É³QæÌ7¯½É³Q›<5å®É!Ï—:B³ÖÖV-]¾X[ßÛ¤­ïmÒÜ…Ùò]jÒ¯7„vooÛ¥¢ÂïÐêW–ië{›ôÖ­Ú°u&L§ãŸ|¦×ýB¸¥Ëëñu9’¤Ñ·2ÇdwÛñ¥¶àò™¼—Twî¼~8gšÞ:°U”ý/m}o“&L§3e•Z—÷RØg´ùçÿ¦#ŽjÃÖ5æýL˜ÔV=ù~á¾ë~æïîÓøIãÌ{]º|±$éÍM…AA´\‡kÏïÁ;î·îÜym~¶@±ñóûbçÿûsÞB]öÒkáe¨5,ðO¿?T±hìèí¿æäÛÛwéLY¥*Ÿ“4f”þù™\ýpÎ4ù.5™a¨±>eÓ¥fIÒŒ9Ó4õ®Éfè×¹5=øüéšzW[ksܰحìuçÎë×› %IO½²LKW,VZFªÒ2RõÔ«ËÌÀÏÎêÎ×ÛÛÛÆ·É³Q÷.ÊVZFª’ƌҌ¹Óõxûæ@ÿy´Ì¼†uᄌqæ˜ f(ì7®Í?ÿ7ù.5éÁœùÚ¸m­YÙš–‘ªMžŠwèLY¥Y‰))0ÜkmÕ¶Ý›4cîtó~Œ°¹»! ÿ3Ÿ»0[·­5ïuéŠÅfëúN»ÎŸ¹ÆîäÆýúÏñ sî7Ç ‹ÕÂÜûÍï³’ƒGù °±^ /Ci©š2}rP5Ÿ8fwuçΛçlض6dÛòœûÍs®f×r£Â¯sHv&Ìæ>¡Î/*ü@¾KMš2}r@»ºáñgrµtù‶øMžÚúÞ¦.7ÙqÄ9B^;­S½ÆÆw¬ÿøÑîC:SV©Ñ· Ø)Ýà_õj|®Ô‡›c_c[ÀÜÝ ‹ŒÏŽwèñgr‚^ŸÑ^ëÿ¼|—šÌq„ZïÔÿ~C­cYÒ¾>§¿¹ ³µaëš°ë|ÀzmÍËpkXÎ]”­G?Ó¢CZº¢­•جºôÛ¡Ú¨üáœi×Ü|Å+¥Ž 4TتÝÚÿúÃÎPçkQ.l¯îë¬s•dÒ˜Qæ8ŒÍq¤¶¯îÜy3äí´N?œ3Í ÊQØ[ï…—aªï]”­··íRݹó*9pT3æNï¨2:Î=ÞÞu®< Å?ôì¨â ~ß!Æd”Æîâ]_QViVùMéf5¢ïR“ÞÜT¨? n•—ÚBV)°ºÑ­ÍàjÐÊ óÃãhl ûŒŒŠÈÎBµ§w癇«xl qÿf…nFèk¯w®þLÓ¶¨±áÐ݇t`÷!Í]”Tù {êµ¶ñpÕŒRG…àGEmƒAgǹÆ:”]rF°;¬£å:Üæ/uçΛá¡U^Ç8Ç]óü¦Æ¶1…«ìÌØpèÀîCrÄ9ô`Î|­~e™6y6šêŸãŸՆギ㿃»q CWÕ†'Ú×Ô4ÂRÿ{5ÇþëWv·ŠÑø¼pç´°Xa„±Ó»^ï2T€š–‘ª× Û7Xjÿ݇Bîrûé•ÊKÿ5,C­—8wQ¶Þ޾ˬFìjÍî”ü¾­…ÛXÏQ ¿ùK¨Öt)üÚ’áΗ¶ª¯¢¬R_œ<«ñÇ)-#U%™kQnÛ½)(ˆ4ZÇ;ßû‰0ííRðzžÝ éêÎ7ïÇX§Ó¿â´«ví)Ó'_÷3Çh¹÷6_˜U·©!?³s¥«ïR“¾8yVޏŽ5?§Þ5YS 7êø'Ÿé¥¿”ïR“~õªG·­å'ÀÆz¥òÒßÂ_þȼùjÛÎÝþЄú¬ÎJÕ™²JÅÆ;ڞádžnO×rî|©£¼³_o*ÔëϘ!Ñâ}o˜5 ‹ ? ÕˆA÷¢ÅÝΙo¶ïŽ>eúdó}×j×ÕžÞÿÍBµÇ´ûêÎWl¼C÷.Ês¨àúö]ÚýççÌçgõt΋z&怒ó§Þ5YO½²LRGå.ì«WÂK3|Ë_IilÖó§öêÉÎU†F y èPPVQV©Í?ÿ·¶ÏY¾Ø ¶ü«ÿµ[û·'w¹¶dˆó§Þ5Y±ñù.5U~´ûŽò™bãÛÚÃ%).¾=p v~´û¹›zÐZ›aªõ¸!±Rv¾Ž±ùÑS¯. zFáÖíÜž~-Çý6MòßIÞx^¿n›.óÎëϘ­ëþ÷ëß.ªÚÓÜñ|˜ƒŸn›ë•¶ñ/Âìí/iÌ(M™>Ù¬|ì\á7wQ¶Ò™ÏÏjåCë´0ç~Mȧ’ÇÌ@sîÂl-̽?äç?“#)¸ÝZêzmÉPçKÒœûõöö]zye¾–._¬ÑcF©äàQØ}H±ñmòl4?ë‡s¦éíí»Trà¨^¶@3æLWSc“JSÉ£š0iœÎ|~6(h3Õ÷ÿ}ŸNýL?¼gšÒ2Rö¸?þLŽÖ徨’GõÒÊ|͘3M±ñ±z¿ð3P}ü™Ü€ÔxFáÖ wÿ×zæ«_Y¦ÍÏèÌɳš0iœš.5PtH¿zÕc¾6úöQZý/ÿ- 3>ÃD‹ ?Ø%\jkM÷ð­-î|©­bÔר¤÷ ÷ŒiÊôÉZºbq@wZFªV¿²Lo¾ê1wÃ6ƹɳQuçÎkó³A-ÎK—/6CÏ’GõÃ{ÚB½pëy¦e¤j“g£6?[`¾Ç\?“0.ÿgª-¼«Ø»zæ&Ó½‹²uæóJ½_¸ÏGl¼C¯Ë 4oòlÔºÜu¦¬Ò¬,»0[?“£×Ÿ-y¿ÿüL®ââcõÑîC**üÀl¿w¿°§¨ÖÖÖVÿÅÅÅš={¶¤¶ èµÂý>Hÿm­鯢¬RMÍJÓ½ðíFø.5u´(_cL×sn¨{îþ÷.©Wï¿'Çï??×;f«Ü/nÌÓ9/šŽøÃ4kÖ,óµh;ÜÀõ†R}Qu7,¶Û¡âõœ{£÷Ü—÷ÞÓã¿‘ù±Úý ç b Xá%K"¼`I„—,‰ð€%u¹Ûx“¯ÙܦzZ“¯9ìk]†—gÊ*õtÎ‹Ì €>GÛ8KŠjmmmí|pÕªUòz½Ì€>ár¹´eË–€c!ÃKèo´°$ÂK–Dx À’þè QZIEND®B`‚ceilometer-6.1.5/doc/source/api/0000775000567000056710000000000013072745164017643 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/doc/source/api/index.rst0000664000567000056710000000014713072744706021507 0ustar jenkinsjenkins00000000000000=================== Source Code Index =================== .. toctree:: :maxdepth: 1 autoindex ceilometer-6.1.5/doc/source/2-1-collection-notification.png0000664000567000056710000010077613072744703024725 0ustar jenkinsjenkins00000000000000‰PNG  IHDRw°âôVpbKGDÿÿÿ ½§“ pHYs  šœtIMEß ;.¡ IDATxÚìÝpTužïÿ—÷úݽb2_@‡8!YýN"VI ΀5$^ƒ·ô’€SŽb9°&(+¬dw…pFæòÓ Ì qä‚«w&¿F«$ S€I ¾0¿bC4®- agjoõ÷ð99Ý}Nw'äWÃóQe•tNwŸþôùôçsÞç}ÞŸÛü~¿_€˜òŸhˆ=w Ü€DpbÁ]ˆAw ÝNDÇçó©©©‰†0 ²²²Âþàn5aÂÀ€ILLÔW_}åúwÊ2D¡¢¢‚F0 |>Ÿêêê\ÿNænÝ™4ZwÝ=š†Ð/ÎýÉ««í·#¸ÛCÓŸÈÒ³‹ž¤!ô‹Ÿä¯Ñéãg"nGYˆAw Ü€DpbÁ]ˆAw ÝN ’ÓÇÏèÜÙóºg\ŠâFÄéÞq©4 0ÈÈÜ‹~°B›­s×DÜvÍ¢ z|ÜlíÙú^Ÿ¼÷ÅÏ/©¢ìýÇ7[›­‹Ÿ_²ë¸rU‹~°B?É_£7_/ÓOò×hí¢®Û”Ž+Wµg[h{lzå—}ÚV}i~Îb=>n¶N;C@Ÿ ¸;À:®\Õ¹³^IÒ©cg"û>ýÓyIÒ=÷¥Üð{W”½¯ù9‹uµ½3àñO®ïÏðø8Ýu÷hëñ£þ¨sg½§™ùifþcšþD–ëöᓳ^-þÁ ­=ò·sž®¶zàÁû‡Üwn‚à}ñ=eܹëÁÚáñqºÚÞ©ÚßÔk¼K0òâç—ºƒ‚}P ¡r÷’BƒŸwÝ=ZëÊVix|\ÀãGtPsžÈÒó¯Ìµï¸rÕqûðÑ?êâç—”3++äo VäwµÕ  šïüΤÑqÇp:úÁÝvúz¦î”G&ëÔ±3ª­¨×³‹žtÌ€=ׇ²öìÑà`òˆ;†;˜MÖð”G&Gµý@0™ÎNÙ¹ã‡XÆnðw>T÷±‰àîûäO^I]Ù²Sr¾£ÊòTQþ~@f¬a2>ÝÊ |rÖ«~וÉ:"~¸î—¢ïý×ï„d‡šÑÌûž>~&`a´ÓÇ»‚—ž¢w ·þm‚Á·ÝÖµMðßÍ¿í:®\ÕŠCêh¿ª«W:5üŽ8Íœó˜kÆêÅÏ/©¶¢^W¯t•Š~Gœ˜|H ô“³^uvtêÓë¥:Û;öéâç—ôeë%Ý™4:$ÞqåªÕNçþt^÷Ü—âøÁïe^ûÔ±3:}üŒ.~~I÷Þ—ªGfMëQ®ùÎï¹/ÅjŸOþäÕ]wVά,ÇÀ½ùµlïº{´*Ë?ЊCzöÅ'C†&KõÞûRï¸rU¯-ÞèX¯wÄÃõú®•V@ð“³^ý$¿{á¶‹Ÿ_ÒOò×è{|G«¶.—$ëï;kK¬¿Û™—üzFÜ1<`{û>W– =ÛÞSÇ•«ϯ,ÿ `ŸŒ¯—«¢ü}Çvÿàýzµd™õú›ÿy»•É,Io¾^&Iú·?ü«õïüQ VäkÖÜÇ­íŽÖ׎uå ¿™z½Áïa¼2o­:®\Õ¿ýá_µvñÆ€ú¾µª×þŠz­Ûµ2ê¯ùÎ/¶^ÒsÓÿ! }*Ë?PÁŠ|M"°ÌDEùûª,ÿ@3ós îº}Þ=[ßs\lîÍ×Ëôòφ¼b ª { Ý»îmeTv\¹ªÊòB¶ÿÔa°Ž+WõÜôЩcgôÀäûUòëuzÿì>í¬-±^Ë'Íû¬+[eÕ¡™ÿ˜Ö•­²2…M€Ød€šíMMÛï=ò­+[¥ue«tï¸Ôí7_/Ó›¯—Éï÷kÁŠ|í¬-Qɯ×YûôÚâAÍMÿôKU”¿¯;“FëåŸ-Tɯ×igm‰^þÙB Ó©cgÚdÁŠ|ÍÌLRW¬Ù'`µž³BÖ×ÚÅuñóKzöÅ'µ³¶DïŸÝ§•%ËtÏ}):uìŒ6ÿóöï¨ãÊUÝu÷h­˜·V?ûR+K–igm‰V–,Óðø8;ëUíoê{üW– GfMSɯשä×ë”3+KW®jó?ý2 ø,ÉÊ´¾'(°o.Ÿw϶÷¬öyÿì>ýÛþUϾø¤$©4(È €ØFpw9ÕÐ~=ˆZ[,üä¬× †Ú3AÒï=òý¼|••Õy×Ý£µjërëÖ|4õq¿lm“ÔU?wüõ¬aû>Ý3.%`{«>ïõò¦„Aðöf_+Ë?Ððø8­+[¥Ys×]wÖ½ãRµjërÝ™4Ú*‰ u”kS¯áñqÚú›ušþD–î—ª»î­éOdiéÏZÛö <¸ONõ„;®\µ·/ÿla@]ã)9“µòzÖòÑÚãúÄ–l>ßÅÏ/éžômýÍÏ5%gòõ2“•s=óõj{g¾s©+@ýü+suï¸TÝ;.UK__¨&wíop°Ø”¾pZέ~ò©ëÏ™™ÿ¸õøˆ;†ëÙEOêÉ÷ˆ}w nÚ³1§äL¶‚Ÿûm7³íI£­ìÔSÇÎèÔ±3g@ƒ™ÅÏŽø£õ˜ÉF•Bõ2ï\úÁ)kØm{S`Ê#“K<»èI+{TêÎ&¶gÞÚu¸NݲY»Ûµ;ZYþ:®\Õ“ïw,Ep×Ý£­Àêi[Ù¼Öðø8-x%?äy&¨:<>®GßùI£Ê'æû°²íÁf§ö4ûëø•¤õ!¥1ž]ô¤V–,³‚Óˆ}ÔÜ@&€`•ÿ˜v¬+׊z+yÎ,ÂeË=z «öë”G&»Ö{}àÁû¥m™ìQÌ ø›C× .:mÿÑõ@²)›,8¸jÊ?Hݪ]ûy^íÝÙ¥öÏnß6¸ýºƒÝûj2¡g¹ì“ÙÓdzpÍw4+ÿqÇ6v*‡Íwþì¢'ÿî$¶Ò›ì¼à›ÓEIÊ™•¥ÊòtêØ=7ý43ÿ1kÁ6·Åã»î ·lØœ'²´gÛ{:uìŒ.~~IwÝ=ÚÊRo ð>~V’4å‘︾‡SMU+,í¸rµ»Ì‚- Ô$ƒƒ‹NÛ›m‡ÇÇ9f™:é¸rU;Ö•;–°;íÁËOJZï¯Ù'{Û)9“#¶Õ¶×3äï9´±[9„h¾s·ïÌ©¼ƒ 컽‡õýýýÞq©ZW¶J›^ù¥¾l½Ôµ¸ÚÖ÷”óD–¬Èz8Ä‚»Ä­†®ÔUuÊ#“U[Q¯Šò÷õü+s»k®Ú¦VÍÞ0AºÓÙÁ§2[¥ÀröÀŸSÖ°Ûö&Øé¶ðW0³ \Ç•«º3©«Æî=÷¥høíŒÞÇÇÍi'·lU)4hnm“F‡Ýó<,ŽTÁ©üC4ß¹½´FÈ6\Äw{ŸpõxÇ?x¿v(ÑÑÚãª(ÿ@§wÕ7>÷§óZ·k%^€›ÁÝâTC×îÙEOª¶¢^ø£rfu—1èéíôý®«D‚=ðg‚˜ÁÁÀÓnA_—Ú¶NÛé)lwêØ}ÙzI—ž¢{Ç¥j϶÷¬Z¸?/_²ýÑÚ®ÒÁ™À§]JZ„+!qÛmá÷«ãÊU ë^,.B9·öŠôHp¨v\¹j•´°g w/Zú>öºÌæó^üü’¾l½¤¸Ým6%g²¦äLÖ©cg´vÑ»¾è[yÄT nÙ°†Yàëâç—¬€_p0Öd¢^ Z,˨({ßÊ5åìÃÁÁO“5ôu©më´ýw‡ÏŽÝüO¿Ô¦W~i:MàØ­LAåGྞs[àÍ¡ž°)í`üûŸ¶Eà‚¿£Hå¢ÍÜ5¯wΖðYË?°¾WóÝØ³‡ƒËOt\¹jí·ýóž:vF?É_£¯—‡¼Çøï׬üÇö7‚»Ä5ï “õiÿÚs=€àœ’ÓÝó‹ÿòÜSÇÎhǺ®àÞó¯ä[›ÌZ§lT§ÅÁÂÕ¶uÚÞBO?Rï÷Í×ËtñóKVù©;`y±54ã×Ô¶¿nwûuïWÀãõ„ï—jÂMÕnÓ?ýR§ŽÑðø8-°µUwƲsðÖ­fr¤ï|x|œ*ÊÞøÛ'g½Ö÷¼ôõ…ÖãN5x¥î:ÅV Ûç5û{úø™€à°áôÄ>Ê2 “ .08%g²îL­/[MÛÀ`ܳ/>©£µÔ¹³^-úÁ MŸ•¥;ï­£Ž[‹“=ûâ“‹ˆ™ÀÞ§žóÚñz¹†ÇÇéÙEO,:f¤ºÕ¶uÛþ®»Gkfþcª,ÿ@‹°BϾø¤†ß§ÚßÔ[Ô•[—YÛç<‘¥ÚŠzU– Ût›¦ä|GçΞ×ÑÇuêØÝs_еNþç/þ—î¹/Eßû¯ßѽãR]ë /}}¡VÌ]£=ÛÞÓ9ÏyåÌÊÒÕö«ûµ®lU@‰ §:ÇöÏï–î;7íëÊuÎs^wÝ=Z?¿d}_/ÿla@{Þs_ІÇÇéj{§Ö.Þ¨{îKÑÕ+:zฆÇÇ)gVWûÙÚ»w\ª˜|¿N?£Wæ­Uά,MÉùŽ.~~I§ŽQmE½†ÇÇY¼¸9ÜöÛá#ÕЕÿ˜•qÇpý¼|•6½òK>~FoÚ²4ï¹/%$°+uS+Ê?З­—TQþ¾˜|¿ž•­®«K^·ÌY§¬Öç_™+©+KöÍ×ˬǘ|¿¼’ ÿàýZ°"_{¶½§Šò÷UQþ¾µmɯ×é£Ô¹?É6AÍ£µÇu´ö¸fÎéÊrv+!1þÁûµ®¬«­Ìsì¯53ÿ±€ý2ß‘SƲýó»Õã ö‰U77EÏ.zRíW²ˆïL­ç_Éù¾FÜ1\ ^™«¯—ì÷ÌüÇôü+sµè+®¿njÀóVn]¦Êò¬65íjÿîŠPB±å6¿ßï§Â[½zµŠ‹‹%ueÆ…E©ì™´Á‹91ÁË»îÝoA>û>EzŸŽ+W­ Ýh÷铳^]mïŒêó:=¯¿?OÚ'Rß¾í=÷¥8.Âé»ìÏ €ÞùIþ+©ñàÁƒÊÎÎvÜŽÌÝÕÓ Ýø(ëÄÔ>¸cx÷©'ݾxÞ`¶Ïeâ»ÀàcA5ˆAw Ü€DpbÁ]ˆA·Ó=s±õ’N?CCèW;:£ÚŽànÕþ¦^µ¿©§! *Ê2D!11‘F0àÂÅ&ÉܬY³ÔØØ(¯×Kc™™™ÊÌÌtýûm~¿ßO3@l¡,Ä Ê2 GõÙgŸõè9#GŽTFF†âââh@ Š£GJ’¦L™BcƒìÀ’¤Gy„Æèèý¸Awµ#Gލ¬¬¬WÏ]¸paØú @Ùµk—Üõx<š7o þØÒÒBèèý¸AÿyõêÕ«iD£±±QÍÍͽzî7¿ùM¥§§Óˆ´Y’>ûì3]¾|™ ý ?Òú#ú#n dî¢WFÞ QÉß»M[ËWºüù×4†ÄÀl˜Ç¸ Ðú#ý ? ?"ÖÜE¯ŒJþ†ÆM¹'ì6guŽà.†ÄÀ<öþoJ’.œù34@èôG€þ€þˆ›Á]7ýÀ<ñ¿Ýoý› ?ôGú#@@ÄÍ‚š»ˆZss³UswÔ·5:RY†Ï¾ÒåÏ|’¤´´4jîbÐæ1ÿÏhu^¹¦¯/uH¢†@èôG€þÐéˆmw5‚»ˆå™ ?ôGú#@@Ä͆à.¢Fp±>03@ôG€þHèý‘þˆ› Á]Dà.n†™ ?ôGú#@èôGÜ,î"jwq³ Ì Ðý ?Òú#@¤?âf@pQ#¸‹›i`f€èý‘þÐú#ý±îvš@¬Ì·ÿÍíž'ÏG^Ý1z¸ÆÜ;:ª×ùâ“Kºr骆'Äéö¿¹]ÿñ×ÿ$ëµçÍ›Gc7ЇÝñ_4æÞQú¿þövú#@èôG€þô‚»bÊ‘#GfIú¿þ‡Î=gýû»yã5æÞQæ6ý¡ê´ëß=ª´´4M:•Fú¹?^ø¿ÐÉÏÒú#pS8pàý`|Ì¢ Ül|_^é“m L¼zåZÄmâââhL€þÐéýBæ.€˜2uêT]»vM{<}üñǽzÍoûÛ!5¡GŽIÝ$ Šþ(I—/_¦?ôG×=òÈ#6lý`|Á]19avÒÛÁ9==]¹¹¹4,pfú#@@èÀÀ£,Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1èvšÀͦåÌŸuù3_Øm:¯ü; Ðú#ý ?ôGú#bÁ]7Î+ÿÎà ÐÐú#ú#nz”epSÈÌÌ”ç ?ôGôG€þ –Ûü~¿Ÿf@4ª««USS#IJÿ^ªÆM¹'ìögž“ç#¯$iÆŒÊÍÍ¥ѯ:;;ÕÒÒÒ£ç$''+..ŽÆèý‘þÐú#ý1‡² nqqqJOO§!ú#ú#@@Ä-² ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ º&z®±±QŸ}ö ô³aÆ)33S#Gޤ?C´?^¾|Yºvíô³‘#G*##CqqqÖcjjjÒåË—i `ç¦---jjj¢‘€‘‘¡äädâ:‚»@9rDeee40@<þþïÿžþ ÑþøÎ;ïp2  … *33Óúwcc#c!0æ¦7näB'0@ª««µy󿀋·2Ê2=DV0°ÂèÀà÷G»ÀÀjiia,QgggÈc‡À.0Èãá­ŒÌ]àÜûÍJKºƒ†úÁ'[é@ŒõÇÇ&&ÑX@?hn½¢OþÜÁX ’ËíÕ±Û¢Úö#þFßKE£ýà£æ6}ÕñW"Á]à¤%Ý¡Ç'}‹†úAOƒ»ôG`ðû#}è/ŸEÜe,úÇÇ­W¢îþß#þ†~ô“æÖ+wP–bÁ]ˆAw Ü€DpbÁ]ˆAw Ü€DpbÁ]ˆAw Ü€DpbÁ]ˆAw Ü€DpbÁ]ˆAw Ü€DpbÁ]ˆAw Ü€DpbÁ]ˆAw Ü€DpbÁ]ˆAw Ü€DpbÁ]ˆAw Ü€DpbÁ]ˆA·Ó¢Q]ß ¦æo½¬ó_´I’¦MJWFZ²r³&Ð@À x­´J’”8"N‹žÉéÑs½­mzû·G$Iߟ˜®¬Ié7´/ö×ûÑŸªÔ¤Q|A¸iõ¶ÿ”WÖ…?_ÖØoŽT~îC4d¿o}ñûp\áfàkïTu}£šš/èTs‹$)eÌ(¥$Œê˜¾Ñ¹Zý ~Ò#Izµ ï¦×²Ù±ms§e*#}, †4‚»"N‚wב·µÍq€—¤Ô¤QÚ±j>“ä^Ø]sD¾ö«ZüÌt=VY×`M@gdeöh’^²·V[÷ÕJ’޽½ê†÷åü—µvG÷I3Á]ÜÌìÇ{b|œ<•ë”Õoþ¡“M›˜ÞçÁ]_{§ 7½£Ò¢ù7E›ö}µ ù8®pKóµwjë¾Zëø ä±þ/#-Y¯ä)/{B¿ÌÕðXH†Jp××Þ©×J«”›5¡W}Ú<¿do­kÛ®ÝQ¥¬IéZÿòÓaƒ¼[÷ÖêûÓcPP–€ë@÷è ëµvG•ØŸ–¬Ü¬L½Z§iÓ•2f¤¤®+¾°^åÕ‡i¸xô…õúqñ¯äk¿Fc WÏîÎÖ­®oèÑsk5ZýšI(pcãeÁšƒºõ'}ÝqM[÷ˆ:¼ª®Áºhc誺UÕ5¸fKõ·C'<òµwòEÀMÄ×Þ©Gn°~ßs³2µaélÇŒÛòêÃÖœ°do­¾n¿ÖçwrL›”®W5tÊ1œÿâò }%{÷œï¾Uô\È]8¾öN•ì­Õk¥Uòµwê©ü…<•ëBöü0XÈÜbÙÆ}Ö5gÆT½·a‘kfßâg¦«tU÷ÄaÁ g/·’Äø8+˜ämmsÌ&pReËò¬`p³)X³“+ ÏPÂcÝå—›•U@51>NoÙ’n“"Ÿ÷vµï„ˆóáŶ…‹äîK =”eÀ^·³'…ò_]§’½û•5)ݺblg^œ-5i”æÌ˜ªE³sÕG_X/IZÿòÓJI¥ÂM飯¾ÁºŠš4Jë_~ÚÊ>lò\ÐkoU˜3Ò’µ~éìråÕ‡õöoèo'kã²ÙV1}óÚ‰ñqZüLNÄ}ûÑŸêº0βûtúã®,è·†<&Ioÿöˆµò¬Ù&R»e¤%kñ3Ó5gÆTÚ[\FúX¥Œ©ó_\VÍ¡Fm\6;ìöö¾1gÆC®“ÝŸ–VëÐIOŸwÑôÓƒûA“ç‚ 7¿c=^£Ÿ–V”5ÉËž WœkÝa°»æˆ^+í®ž§¼¬ Z¿ôi×É{ý ¶î« ¸½=1>NÓ&¥kÑìÊÍ Ì ßt}Ý~M‡Nzú¤<Ãîš#·Š†+Mÿ8ÿÅeÇþÖÔÜ¢Ó·(eÌ æŸ IDAT(ÇÛs}ízªp›$Yc¡Óï¹+'x1™ú½]s$`\6}Ò­ß÷éå›öYõ£] ÕÒ¿¾þž‘¹z2V­ùi%ÄÇé§¥Õu¬ƒ«Šf<Šf¬±7ážë4'´oUu óÚhçâÌ)‡¦’½û­ÿß°tvæ…‹fçhë¾®sšpc’Û1ìöûí6W‹f 3ë¶„ ¢š$£®§ÝÇ3ŽØ·)Ü´O‰ñq®Ç8ç¿h‹¸MjÒ(mXú´|í×4ÍÖ6¾°Þq?œÚÈ̯ƒÇ̬ësM§ï)Ò˜i?6í¿»æpHù¿Ô¤Qš61]ÿ\ëšým~#Ü·×î¨Òé[ÂΜÎÂ}>ô‚»‚?i´²&¥»žŒ5y.è©üEÀ¼}X»£J»kŽèßþÇ߇LhÍÀÔÔÜâøÞÖ6ý°p›6,}ZÓ&¦Ô¥²Þ¿¹E¾°^n/ ØÇó_\Vý ü~éÁg‹­IHˆaúºãš|íZ»£JUu *]5ßuß¾?Ñý$ôTsKÈÕÝàǼ­mŽmÓä¹àøyÌgúqñ¯TUßµod_ÞêÁ¥-ßôŽ•9®ïÚ3óÝ&îÁæàã®dï~}¸½0êã.š¾búcÈD³ãšõxyõaÇ,”ªº:á‘§r 7½²¨”¯½Så5‡ÕÔ|AÇö…¶ÉÞZ-sX|Äœ™ß·À4PZ4_>[¬¯;®©`ÍNM›”Þãße³©SÝ>·±ÒÞ?œú[jÒÈë}Þãxqã-3¬©¹Åñd­º¾Aõ'iæ„RWf^ÁšJ3Rï®ÑÚÆ>½VZðškwTY´Ü¬Ly*×Y}ÆS¹Îªçiæ¼á~æÌ˜jý>˜þækï´‚ŽýÆ´yµ Ïzû\¼©¹…9åaÿ-Îp¸#2{àî”[°ñúñh?Çxwý‹J3²Wóž‚5;ÊH˜cô/Ç߲ưàcÑþyx½¾°éWö1ÅÌã–ozGUu ÖØgï—æüÊþX$ö¹ð²Mû”>s…–oÚ×£² nûa?¯³Ÿ¯9}>3¿.¯9¬B—‹¦æ|×>f¦Œ©Ü¬ ×Û¦{Í{~_{§Öý>™…âÌ÷b~ûþrü-kÝÔÜâz,E:0¿o‘æ¸1w8JÑ7WíÍÊ¢æ$lå‚îÛqLz³ ›·µMË7ís˜nXú´V.ÈSFúX%ÆÇ)?÷!ë ª¯½S~¿_Çö)?÷!¥&²N¶ÍdÛmÒ+u]¹Ý¿½0àʸ}±¸¾,žŸ‘>6 c2ęÌg{»½»a‘?3Ýj7ó¹ÌD¢ª®Áõ*:n‘þÕqn¿ÚïT’ÁœP&Œ¦ýÛ •ŸûPÀq÷Þ†EÖI`ý Ï€/(¼_écõVÑsÖ‰ˆ·µM¹Y™ÚÿfW–~b|œ²&¥ëÝõ/Z¯a¿}Ý×Þi}æñiÉÚ¿½0 °”‘>Vûß,´UN'Ü€}Ì0ÇJU]CúGyu÷-”¯äiã²ÙÖɹïö_?Q4PæoY“Ò•2¦{ì2ãIjÒ(«l‹ä|á§æPàïEð ­=`”wýÒÞoRÆŒtí7æDrë¾ZÇ»SLŸn®ú¹6.›­• òt|O‘ë…e»Hfîeæ“ö²MÍ-J3RÇö)/{‚µ}< ‚˜,ÊiÓCÅJM¥Ëf[¯ïmm ~Ùçy–>­·Šž³~L WRaù¦}Öë}¸½P+äYïoæâö9%2Ÿ¯ãZÀyBo˜ñÈ-“S’Þ]ÿbÀ9F^öÛSdÇÑÎ{ìÇÍ¢Ù9!‹qÛÇ0_{gÈùž}þiúUÀ˜²½ÐÚ§òšÃÖØ7>­û=Ƨu_õäN޼ì ÉBf|þüzýíäëÁg‹#{ÝöÃ~^éó½·aQ@€×í¼Îï÷kÿöBkÌl®ú¹R“F…¬§aïãö÷0Aòàs…’½Ýã´9&Ìó³&u#O s÷¹ãÏ|¾àó‡Ëf[çáæ¸1wœ8Y'\ñÃúä5Í@“›•éz–Ÿû5 ¹¦F s¼ÆžId&ØÁ¢™mp©Å™Ÿû5™ØÅbU}õ=˜÷ Wësñ3Ó­}sËJÆ­Ãxq *™@NnVfÈñÞä¹`M&?3Ýur¼rAžuÜ™“Õ ž9õS{ÂS“FYûlÏ^Ü]sØúÝ[¦œÉì`ÁHDRZÔ}’hî8‰†½\ŠÛ¢9éc­NOúžÉêùýÉæ€Çí%LV½4Sp1'£Uu ¶ìžÙ®ýÆ^#Ò~aÉnÎŒ‡¢ºKÈØ5YAvÑ_¦Mt.5fÏ \üŒóz f^j¨ùÚ;µhvŽr³2ïšqz}ûóM†eʘ‘ŽsaÓßÌoÛ\|ÎŒ©QÍ)ÝJG`àœ€àWnV¦ã‰ñqwV:ÝÑÌ~ŒF3†ÙÏ÷¼­móO§~•§Ü¬ aŒ½•ŸûŽí)ÒœSCúPSs‹ìMŸ¹¢Wç‚Uu ÖXnÌ´/bîv^——=Áq~îkïÔ«yš61=`¡e·s…`¦Žr¸cÂ-#:øüÁ­„ˆý·ÏmN€Cp@À·ñuûµ~½€âüYáë÷ÚSNW+3Ò"_…íí•í”1#Ã^å5޵2ªýd:Ò¢2&¨uúãÏ8€oqyÙÂfèÕŸè^Ü ß!k×~|ç]¿Ø)X.¤_N¸]úƒý÷Á­/;ý>Ø÷?Ü­àö¿‘%p‚Ë3,ˆògs,N‹ð›o‚?n%œ˜q$¸¾»kì¬àqÎü–Ø/üV„é7fñIÖ¢¡nûNpÆî»ØÅ Œ5ÁÙyÑŽ5&;ö½ ‹\ûŒ¯½S·é6Ç¿™ 3¹aæÒ‰ñqŽseû˜)0f>wð… î9Y¿ÍÃOö>Íù½>p¸}7Ç ý|Ï~Þn ܸl¶ö¿Y¨÷6,ê—±û­¢çôåÁ}¸½Ð ”Úy[Ûôãâ_© ¸g¥Kì}0ÜØ•g3›Ã¶ŸÛÜcÿ›…®ó`ok›.Øî^³?næáÆdû@nçá¾?ûç;5Àç· T0 R",`ÿû×uwS’FF|ÞÔ¤r›ˆ»ñ¶¶õxQƒž²œ^+­ ›¡en1çöH]"^+­Ru}£|íìݶ¬ §“KŸí‚N¤[Úì¯Ûä¹³‹o½l}³Jz$LHÉâg¦«ª®Q‡Nv- R²w¿k¶]ð‰ßïO6‡=íwØØoÛ {Ÿ=ÁZ(´º¾ÁÚ¸6)= û½þ„Ç:Á3'ì‹‘^ï7ÑdPOK¶êÚ;Iˆ"€<VÕ5D†ªC'=:åi‘¯£S‡Nxtþ‹ËaçqöUëÙ6)Ýaß }Édè…›Sº­“ЇÁ]·Œîpçfös_„„ûñrúãÏ¢ÃÌùž½\ÖPøm.©PU× ªú+k·¼æ°î1ÌqÒpóF§À¨[vëƒ)QœƒúÚ;uêã:Ñõ:§š[ÂþÆØÛ|„dª”1£¶>>~ZZ¥Ÿ–†i‹[¬ß@ô=‚»‚~´Gêü—­ßš¼ö2í©¹%$øM6¡Ÿ®rO›”.•v€ýÜ n ZsfLµq©ªk°2ìu3jíöôdÎ>A6À4”¹Ý1ôViÑ|=øl±¾î¸¦×J«•›5!ª±#8»¶¯äeOÐîš#ª?᱂»&3(/+S©I£4>-Y§š»N³&¥«ÉsÁú]Èp÷›¾Ê>K1Licuè¤G[÷Õ*/{^Äœ×J«T²·Öq¼µãv7:6Ù/=™SD2¢›gÿ¢wc‚9–¢¹û1ü¹È…¨ÏUz2†9ï EyÙ”—=A‹gçhú ëõuÇ5mÝW«ÅÏäô¨ôöÓžÌe 7½ãZV%eÌH%ÄÇõy’‚ý÷Œ¹ôà"¸ @nÖmÝWkÝòÙ“Œ¼¿üce¤%kÎŒ©µ»zjÚ;a³©{û™z˾Ò*IjÒ(åfeªº¾Q[÷ÕZÁ]{L·EWz„±_µè>ÑƧ%Ô 'qÄ04DÕW.ÈÓòMïXå¢ù=µ /ê1°'w«L›˜®Ý5G¬€® Ü&ŒfóÓ&¦[Á]Iª²ÝfÛÛ O_dš…S’F)-ï'úºãš¬Ù©?¼½j@n]úBAñN+èb¹Ó&¥w•KKVFúXÕŸð„d=öö®4'¥«æG•ùg~Ã0¸cˆ¹àÖ›2öõnô¼*RpØ>/š3cªkA°Áž?–ìݯêúF=ðí䨲p3ÒÇjÃÒÙ*¸^n©§ ?_wô_F|pù¢„Ã4mRº2ÒÆ*#-ÙÇ×î¨êóà®}æ¼upÜ /{‚u dyÍmŒ2¸kpjjnÑ÷¯ß²©Ô‚ÝùA,+ГÁv &»Á“²“У>œ5AÕõjjn±2oì %D›=n;{Àæf8¼Òq~†>çTža°~óó²'¨`ÍN+Kݪ·k{¯¬IéÚº¯ÖÊöª¾^¶!?è‚Yp5š;|ÌIäøPÙx4sok›Öú¶X`0UÕ5XÝܬL½Uôœã… §;ÞìÛEÊà<ä¸fEwßKIÅXCògLµ~ïÊ«÷¨Ö¸½œÛEýpìó¼Ä‹lÛFÄõø³¿~¸Ä"ok›NÜ¢[íÖë—Ýãs´cIOÎm»Ÿ3R:]æ|¤,i7»k[¯?gÆÔ€ÅÙzrÞyÈV–ÉÉ…?_ŽøZ\<,¨ ä¤ÒüÈoÝWÕ­5¾öNýô­jÛ‰XNÈ„Òi§€ ©í6´¾ÌRˆFSsKØì"s‚Û›‰DoÊ[´[]øv+(Þ©‚â®AÜ‚'¹YõÕªëV!ΓM°xF„ÛªjugôõD¸Óê¿ýašËBSN¿q¾°^Ë7í‹Ø'»Ò¢ùV|­´ÚñÄ(5i”5îFê%{÷뇅ÛôZiUÊ7t­4ži+¦ÛOâì¿åÕÝ'ŠÁcŸyN¤EÝ|í¶Û‚ûf\_üÌtk¶î«åPÄ{PgÃÒÙ®çnc¤é»á28íýÍÎ>Nïv¹]ÛX»£JÅ;­òN\sftÏç 7¿õo~yõaë·qÚÄt×`[¸ùL@æo”õÕíóÃp¯<†Ù_?\ôÐ ž\¾MÓŸ_õ‚¢áäegŒ­Q×Ù‚¯Ñ–´¾HsÈîÅ3{ôYªêºÛ=\`×i~oŸƒü>L-Ü&ÏÇc°'ç?,ÜÖuCõa:x? ¸ „ýöäïþhMÄ“·ÂMÝŽ93¦Z“ˆÄø8ëjñîš#®¯Óä¹`©·¯È=Öî¨r„˜‰†}8{\ŸÛ›[R3ÒÇZ“¤·{Äu2W£òšÃ*·]­Ì éw&X”0bXØÚfö ;?}«ÚõØ-¯>l“ùQfƒ˜ãÙíÄ´þ„gPŽa{6Ë‚5î«—ì­½žyYÛoµ½qs2åÌxéö{núlSs‹ëI¯½S¯•VweVv=awë»&(ûû“+d?é´¯d]¸ùI]Y8ÁYTöú»f»Hãj^VßÕU´Ì\ÏFb·µÍªÌÔÉö¶¶iù¦}ŽÛnzÇõ7Èôëêúưsñ×J«T^s˜Ez‡ˆÄø8+PçkïÔS…Û"5Ë«[eF SiÑ|×mßþíÇßO{ÒŽÓàÄüÆ{[Û\¥¾öNn~'d ËH0ÿtc²‘£Ý§hæÊf,Y¾éˆÇ®>ÒÝ.ö‹£ö ˜Áw£šEM%…½p²vG•õ}ôå˜iŸk»-df³êOxÛÁ|w7zþ`îb^” }ƒà.€Ð:{‚ôðµwêÁ­QAñ΀ÁdÓg®°n5sª[¹xvŽ5 =ºpCH–Mý ]¸Áš„¬,È”ϼu_mÈ„¹ª®Áš OKYíÜ\U­?áÑÖ½«yoÝ[«‚5;]W¨µ«®oС“®iG_{§¾û£5!WzëOxôTá6«Ý^¤vÃÐd&iMÍ-Úºï@@ð(sÜy[Ûôè ëCNðÌqm&¶ÑÖU3'–ÞÖ6cªë¬cy ¥&Ò¢ÙÝÚàÏÜL«²&ãÓ&¦sK+zѧGÌ|ZüL÷XY°&4sÎôIÓw‚ÇZ{æŒ)­0AÙ&ÛÊÝÁ'ÈÓlY¹öç„ë7?,ÜЧM¿1'âÓ&¦÷é¢9©I£‚]ng¡Â~ÛóOK«ƒ.ÿmᆰfÌœ³do­~X¸M‡Nz¬lÝG_XﺈR×ïÅÓVß|táÇ9¥™‹KbN9ÄÎÉÌïmSs‹üÑ-ß´/à÷½kÑÜ=úÂzkŽfƉH%¶œæ=öÇ¢]`ñ39Öq¾|Ó;®cXw2ÐC®óÏn Ù§‚âÖØåv|¾]sÄqìs“ðù ÖìÔ£/¬×îë¯cö§º¾AÅ;õàÖ¸ŽÁöĤêCçu‰ñqÖ˜ÕÔÜòù$Ì5ǧ%÷¨GðÀéÐîš#!smûïÍâg¦[‰kvjù¦}:tÒc}þG_XöNû÷÷Ý­ ÙvwÍ+8œ0b˜už‚¾EÍ]ŽÞ*zN #âlõw‡8ŽOKÖþí…!Y·¦øüòMû¬ ƒ¹ýãü—­Á-aÄ0mX:»O®ÄöFˆa*Ù[«’½µÊš”°o)cFê­U¡W¾WäéÐ ¾î¸¦e›öiÙ¦}JMò™ \2ÍÂWMÍ-šþ|מÊuJ½^­tÕ|«Fâ ·)1>NiÉû&IïnXD}#„?ÌBæX‰f"•—=A–>­å›ÞQSs‹Òg®PFZ²ããÊ—¤Œ©w׿u–ýâgr´»æ°¾î¸fý–d¤%,ü`Þw m\6[ç¿èʘª?áQúÌÖo”ý3OKÖ»^äàB¯”Í׃ÏëëŽk®'™û·Z+q¯ÝQ¥µ;ª”5)½«ü-³}Ñìœ`©=xd‚Ó&¦kÿ›…Ž¿ Á'ƒ'ˆ¥ÝÿvËÎ߸l¶¾îèÔîš#ªªkPU]ƒuác úÍÊyªªoЩæmÝW«¼ì \xÁ•ŸûJöÕêTs‹ÊkëÐIÕgís:û8x¾µM²Óo=§é/¬×©æ«ÏÏÃ%9.–”‘>6dN))d¾+u-ºÆœrhÙ¸l¶2Ò’­ó s¾â&eÌH•=ñ7qüõyXúÌŽc͆¥OG}a.1>Nï®1d 3­ö×3cªuG‹}þ¹hvŽ¶î« SìÂ93¦>í³f~<öEê›’¬¶­?á Ä4çvNíbÎmíýó/Ç߲Ƭó_´Œ™NóëñiÉzw}ÏÇL3Ï>ÿÅe•ì­Uu}cÈoLˆaV›ïÄ~Œ˜9È©æÇclÎŒ©:ßzÙ1û×þý™@¾9oµ>³@*‹¡ö2w„L|¸½0lÝŸ”1#µaéÓ:¾§Èõ‡:?÷!í·½Ž©j/å°{a¯Rö¥ýÛ ­Ìªà};¶§È1蜑>6àyæ³I]Ûc{ŠÂ®J¼² /$›Ë~›J~îCúÐöúf!{ª·rB ç‰Þìî`n¸šk¡Äéúp{¡u¢ØÔÜ¢ú]BfbèÖ'ܤ& é+özžû·j|ÚØAk«÷6,êZEüúDØüF™Ïl~£˜Œ¢·ìåÜd¤Õ±=EåBì%KRÆŒTéªùŽ ¿¤&Réªùw‹8Õ÷µ÷A§lâ¬IéÖk$Œ¶Ÿ¿Uô\@¿1'žöN¥Œ©W òÂÎ n”ý¢+å0Ô9̓͜nÎŒ©òT® Èž3 ¡ÚƒgÇ÷©tÕü€¾Ûu×Üõyøˆ¸°A¬ƒÆaûœr|Z²>ä¹8öûóT®ÓœS]ï Ÿ–¬ÒUóulOQTç–ÎÖ«yJ1,d¬ywý‹!w,F’‘>VÍU?ØÇ¦æ–€×ݰôiך°—ÍÖ»ë_ SÂ=71>.dì;tÒÓçmkæ‚Çö¹öû¾[s][qð˜iŸ_›1sÿöÂ^]\1ÁuûrÁ¿1Çöiã²ÙÖg ®Ám.2oXút@­ns\½UôœÕ¶ ‹ì™¸y®9oí¾(Sû·Z"×­à6¿ßï§êêjÕÔÔH’Ò¿—ªqSî »ýÙ£çäùÈ+Iš1c†rssoºvxlb’Ÿô­[æ¾’Ù›1ƒ¯ ›«–ƒaíŽî[`Ì•Uok›`íɾٟ×Óv1mîý†R» ”ťǬÿóÍ7éƒÀ~\›ÀO_¾æP\U·?>óÍÞŸþyëÿK ¤±úaÌíI_1Çð@»ö~“!(ŒÞyÿÄgúàd«ã¼š±0vôç|núóëuè¤'bæâ­8§ì ·^Ñ¿üöO’¤oûÛZ¾|yÀß=6mÚ$Iº÷›#´$÷þ~Û—&Ïùlw‚ÜÈï½ýxˆôûítîÍÖÓq¡7cJ_}ÁmÛÓöæ¼®?ÇÌþœÇþíäKê*î‚uð>ôõoÌ–ê3úäÏ’¤¥K—*=¹ºDY=ЃCb|Ü–¤&êU°©·Ï‹¶M†z»áæt#Çõ@¾æ­´`ÌŠÇ0ýè¿yðÚUºðÅe}bškÖ ¯½S§>î ÐÙ³î˜SÞœú2Ø“ãÁ^> ?Ï{3¦ôÕ8t£mM{öç˜Ù›×^¶qŸn»-||{ù–ÄøaÌ † Ê20D•×¶jæ:Ù]sØú[ô—SV†/™Þ7“ó_´©do­ 7¿ãúcD“œ[Åà#s€!(/+Óºþ»?Z£W ò”’4Rc¿9Rþ|Y»«X‹çfeF½ “n_L+%i$ sÉŸñªëåmmÓS…Û´ø™ýf×w|áÏ]‹´™2‹fç•;DÜ`ÊH«ÒUóU°f§¼­múqñ¯·ËÍÊt]¬ è­’½µÖÅcÎ Þ»™äeOТÙ9Úº¯6`!»`sfLu\ÔCÁ]·¬”1#W †Šü܇4mRº^+­RSs‹u{üø´d¥Œ©ü‘±‹~a¿M|Z²VäQúã&´qÙlåeOÐîšÃ:tÂc-ˆ6>-YiÉš3ã!¾÷!Žà.€[z¢ì¶00T¤&"3nã²ÙdkÞ"²&¥Àa,¨1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ "¸ 1ˆà.Ä ‚»ƒî@ º&zïrû_õÿ}ÑNCôG}èÇ1ޱ<ŸÿïΨ·ý÷¿þú!ÐOþý¯ÿ‡Fp@p¸Ç>nÓ±Ûh€þ@Ò5gi€±¸¥}þ¿¯1P”ezhäÈ‘40€¾õ­oÑ!ÜÃõQý?‹‹£Q€A–œœL#lÔ¨Q4Âudî=”™\6¾u IDAT™©––µ´´Ð@?‹‹‹Ó#6·ùý~?Í€hTWW«¦¦F’”þ½T›rOØíÏ='ÏG^IÒŒ3”››K#}$bæ®ÏçSSS“$)+++â Ö××K’222”˜˜Ø/;mö)!!!äÊu]]µ‰‰‰š9s¦]·Lf?£iW°‹Ü­¨¨Ðüùó%I;wîÔ¼yó\·­««ÓÃ?,Iê‹„`ŸÏ§ââbÍ;7 (»eËëñ]»vYïÚµËÚW#%%EŽÛ”²²2}õÕWZ²d‰õ˜×ëUvvvŸµ€[KÄÕ­ÿã7¢Ú6##ã†wÌëõêïþîï´sçÎl[ó>Á¿üòË’¤™3gjóæÍ***Rff¦ëöá‰'žÐ¼y󔚚êøÈÚÐ3wMÒdÀÖÕÕY§nÛöEµ®®N>ŸÏ1ø¹zõj-Y²$à}åóù” ŠŠŠˆÛ³/Ám–™™©ƒö[é 7·ˆÁ]SvÉ’%zùå—µeË×னÍëö÷ž0b§×r ÒÖÕÕ¹þm°êìš}JII ⦦¦†dó@´Âwí%–,Y¢Õ«W«²²R^¯×10.s×ëõê7Þ°¶ILLÔ¬Y³4wîÜ€íÌ‚h•••’ºÆÅÅÅš9s¦Ub¡²²R)))š7ožµ½ ¤ÞvÛm*..¶þ¼}°²²2UTTÈçó)11QÙÙÙz饗ÛÃç󩬬ÌÊ*–º‚ÏsçÎ h¯×«²²2ë³ï“$K’ŠŠŠBÞ§²²Ròz½’ºÁsçÎu t›÷2¯íõzU\\l=wÞ¼y!mloëÊÊJk?SSS­Ï`ˆó‡±yóf¿$VV–ßï÷û_zé%¿$ÿܹsC¶=xð _’ßé%—,YbýM’?%%ÅúÿììlÿW_}em;sæÌ€mÍ ~¿ßï/**òKò¿ôÒK~¿ßïÏÊÊrÜ~æÌ™ŽÛ þÔÔTÇçfffì“ßï÷ïܹӟ˜˜è¸½$ÿÎ;­mó›ß8nSTTd½·$BBBÀ{|õÕWþììlkû„„„€çoÞ¼Ùõ;***ò¯^½:ìûÚ™v þ>$ùçÍ›çx=üðê««SVV–>ýôSù|>ùý~íܹSRׂqæóG•••Ú¼y³6oÞlíŸÙ7“%lNqq±222ôé§ŸÊëõÊï÷ëàÁƒJHHЮ]»´k×.®|CXØànp ÝÔÔTeeeÉçóiË–-Û:,ëêêôÆo(!!!d!¶ÌÌLë5Þxã Ç×r*C`ö)ø}$çÚ¶NÛ¿üòËòù|š;w®¶lÙb=ǾOö’ö «W¯x%K–XmÜ•º‚µæßÁŸÃéó­^½ZVÀÛ^æaÞ¼yVù†àv7ŸïÓO?Ucc£–,YbíŸ Æ3ïY³f¼Ovv¶U6"8ˆ `h‰*s×5ò²²€m‚¨&(ºdÉÇ:¼ÙÙÙJHHLt«Ýëõz­ŒáàLÜh·ollT]]B¥f»¢¢¢€ÀèÁƒÕÐÐà,=þ¼kÛedd„üÍ,RgöÉÔ#–º¯Nm%ɪE,uÍûlÙ²%¤²ù›icÃßxã îêÕ«uðàA×Ï `hp]PÍô³MgÍš¥””y½^íÚµKóæÍsÌRõz½Ök„ fffZÁN#8c8xŸ‚K?8•^pÛÞ”˜5kVH–¯Ô•l/ɼŸ^¯7 C·®®Îú·S6±SP;8s×t322ª‹ô: ދʽçÍ›§-[¶èüùózøá‡•­¢¢"egg[‹ÊÚ\3wÝ©Rhö®=Èh‚“öÌU§ ªœõjÏF Jº•kp ;m®äƒ›ââb}ã߰ʬ^½Z«W¯ÖæÍ›õé§ŸJ - )àlßÞ<6kÖ,×}0û’’òZnŸÅí‰jllÔܹs­í~øa«æ/€¡Ï5¸ë`•º2?M]{†®S©„p]{¬ @Úƒ˜ÁÏu[ˆ,Rm[ûö&˜ì–!lÉ’%Z½zµü~¿^zé%«Dƒßï—Ïç³2f{pþ ‘ö)xq;ûcnÁ]§RFbb¢víÚ¥O?ýT/½ô’õ}>üðÃõ† M®ÁÝHA“eZQQÑ«lX©»DÂÌ™3­ÇÂe£ת•zVÛV \ô,˜ÏçSqq±Š‹‹­ ±©…[WW§-[¶(;;;àõL Üà÷pª lß_§v5Ïqb²¤íÙ½á¾#§÷òù|*++ XÀ.55U[¶l‘×ëµÚz»ÀÐçÜ — k˜º´eeeV6¬=Èhþßi±1ó&Èh¯©$ƒ½ôƒä^þÀm{Àt ¤VTTX Š%&&‚¨uuuŽûks*iá”ÑköÏ-ðlè Vp7Òwd/ÿ`/•1oÞ<Çàmbb¢µÀ\¸ 3€¡Á1¸kƒÁQ»ÔÔTeee©±±Ñ1À9kÖ,%$$ÈëõZACÃçóéᇖÏçSVVVTÙ¨‘‚¸Ñno¡öìUó:/¿ü²¤îÀµSÀÖþï'žx"äu¶µn·¶2ûäô>óçÏ·öË|æó9e,»µKvv¶$)ä; ×f†žÛÿÿöî>*ª;Ï÷ý'´ŽX%œ}­@´lõ˜„‚éuî‰â™6̸ ÑèIèhΚé$À¾­=‰c§[ÏqZˆéž5m‹™u“¥#Ü#FÀ³fº¡ðš EޱröR%Mr½”{[EíâI@ ߯µz¥ªöÓoÿjÓ1¿|VÔ: \YY™Ùú ¼BÔPYY©’’•——«¶¶Vùùùêéé‘ÇãQOO²²²TSSq̵k×$I%%%ÊÌÌÔæÍ›•ŸŸ³õƒqý¡,¦&…RÇcö—ÍÏÏ×Ù³gÍ{÷î5IJJRQQ‘jkkµlÙ2¹Ýns1²ššs[,mmmZ¶l™²²²TYYs‘¹ââb­]»VÕÕÕ× ×æÍ›#*nûŽbmÿNŽ;fnohhPCCƒÍpÀÄeîÔ÷6\qq±222ôÉ'ŸÄ\x-33Seeefx(…‚àþa¥¡¢¢Beeeæbk;wî”dÝÎ` Þ¶±4KJJRCCƒå˜*++#ªˆ¥P_ಲ2UWW›Õ®Ú»w¯ÂöööÊëõšcp»ÝjhhбcÇÔÐÐ`¶g¨2Öãñ(33S'¢ªvéÒ¥ª¨¨ˆºÁªlc…ÞÆwRQQ¡šššˆp½¨¨H•••C^lÀ­sÇ5£TvŒ- \.WDÕê@ûe¿ÑÓP‚khhPffæ°Âϑ܇lwº‘j•¶$ýæ7¿Ñoû[IÒ¼ÿ©‹f¸ÿ‡'>Ö™ÿ~V’ô—ù—Z±b?qÀ(¹k¼.”””4¬°r¬ƒÝ‘Œi$aëHîc¸òÍ ¿.Ÿ¾Á@ü!Ü€8D¸ qˆpâá.Ä!Â]ˆC„»‡w î@"Ü€8D¸ qˆpâá.Ä!Â]ˆC„»‡w î@"Ü€8D¸ qˆpâá.Ä!Â]ˆC„»‡w î@"Ü€8D¸ qˆpâá.Ä!Â]ˆC„»‡w î@"Ü€8D¸ qˆpâá.Ä!Â]ˆC„»‡w î@"Ü€8D¸ qˆpâá.Ä!Â]ˆC„»‡w î@º‹)@}}}Ú¿¿.\¸ñyÿ÷ÃqâÄ }ôÑGŸ¥¤¤hÅŠJIIaÒ€a"ÜE”ßýîw:qâĨžóÂ… –áð·¿ýmåææ2éÀ0Ñ–QÇ û|ËyϨìCÕ.02w\»víÓ€þš››U]]m¾¿kÊ]ú³‚§oN¹S‰÷ØõÍ©C+úþòÊWêý, /¯~­?Ôÿ¿úêêWæ¶µk×Rµ Œ•»°”››«µkךú•>lþxXÁ®$}sê]J¼Ç®›?&ØFá.bêð~~>¨ãû[ô啯†|Ž/¯|¥ãû[ôùù ùÁ.pów1 › x v€±C¸‹A$à%ØÆá.†d8/Á.0öw1dC x v€ñqǵk×®1 ŽææfUWW›ïgÌ´é¡Õ9’D° ŒÂ]ŒˆUÀ+‰`'„»±þo8‚]`lÑs#Ö¿¯`{„»¸)ý^‚]`|Ж£Âï÷+!!A)))L0w Ñ–âá.Ä¡»˜‚øä÷ûõÅ_0âBJJ =¹e„»qhß¾}zï½÷˜q套^’Ãá`"%´eˆC~¿ŸIw¼^/“À(¢r7ÎMëûBßøú+&À„tuÊ}9u*À Üsÿ‹ß/[0ÈD˜:ÓÓÕý­t&€1@[ˆC„»‡w î@"Ü€8D¸ qˆpâá.Ä!Â]ˆC„»‡w î@º‹) ÕÕ©SÕ›8CA»]_ßyç û_™2Å|}âÄ }ôÑGL"& ‡Ã¡Õ«W3âá.`P_L›¦ÿ阥 Ý>âs\¸pA.\`21a|ôÑGZ´h‘“ .îô?gÍÒSïa"0)õõõ1 âá.ÀÒ×wÞ©s™êMJŠøÜ~O†ÓïÓTÛÝšjKb¢wÎþKú.u1âá.ÀÒ§³fE»S¦'Ê™÷¸Óîcr×îüæT&À¤@¸ ˆÒ™ž®K3SÌ÷ióÿƒÙÿQwMù&€ ‚páêÔ©êþVºùþnÇ|Ý÷¿>ÊÄ0Á|ƒ)„ëJO3_ß9åOäÌ{œI`"ÜDèIL4_Ï[ö$­˜ w¦Þ¤$ýw…:öÜ9åOX< € Œp`ê›6Í|”Ê„0± ÀRbúįڭ}cÇ€Ûl‰š—½H÷νÔ¯¹¸pf¦;†tLã¡}ºÐå׈`bëëëÓ{ï½§””-Z´ˆ ’wa¾H¸Q¹;=ù[z¬çÚO©fÏŽ!í;?'WÏÿd¯ì‰7÷Ö^óšËŸX?äãjߨ¡ó~mØõŽùYËûuªÙ³CÙŒ8Üí ôêͪMÃÏáý»u¦µYÏÿt/=ĉßüæ7zï½÷$IgΜ‘ÛífRá.à†¯ï¼Ó|}甩z¬çÚ?$M³ÍÐ ?õDl;ßå×ùN¿ú‚½:²·N·4«æzªtë¨\3%Í1ä ¸/Ыó~IЍ’=ÝÚ,I7UU<’ñHÒ™Q¸6`|ùý~óõ‰'$‰€îâÓ¹öS’Bå@­f¦9ôÖk/«©nÿM‡»F ;?;wØÇ8œ #>a›ç¦çàF@¼pXÇímêâ€8GÀ $TÄ)£ju° Õ~Ã+hG~M#P^8ìqŽE•¬1žá„Í€ÉãĉòxïôëÞ¹÷+ïÑ5Z¾:ºï­Uu­qýþç—B ¤5Úg·3ÓÊy¨PEßû›¨Ö áã9×~JµoüZÞ¯3?{ò…W¢ªšköìÐo³®1{ýöOËûuª}ãïÌ1ä=ºFO¾ðŠeë‡Ó-ÍjªÛ§–ãõê ôjfºCO¾ðŠr–š×zò…Wh£(áî4õ] ý¼ÜÞ¨ÜÄÓ-ÍæëÁªhí“ÙátK³N·4«/Øk}þÖÐöÛ0ó|§ß¬ü}ëµ—U³g‡RÒr8ê\û)½YµÉ\Ü,\xkhy¿>tý°àY’vý¨DoVmÒå@¯²*мì\]ôêðþ_jûó+Õ¸1ÞðJäó]~mvWŸ|tJó²s•’æÐ¹öSÚõ·%QÕÊ­ÇCמ™æˆš[¢ö¼Zª]?*Ñå@¯æeçjšm†íÓ®¿-±œÛíÏ?®ÆCû”’:Ëﮕ¨ñÐ>5Õ틺àæeþûBýé—ùž ^n_TîâŽQQ:ØBbFÀ(I¯yÖü|°ÅÌn´RXuMIºvíšþëÛÿjV7Ú§=¯–êÈþÝzxõ³æçç;ýf ^AkÕÞátK³ZÞ¯“ùP¯TÿÎü¼/ЫmÏ=®sí§ÔxhŸyÆ=HÒ›U›ôÌ‹Uf%®$½¼öÏå÷} –÷ë"îÝêÚÆýùõn¥¤ÎÒÏ3çæ\û)mv7"P—BÕÍ{^-5´ oñÚJôÖk/«/Ð;ìÅÞCãÌ{\’ôǯ$*x¸]Q¹ ˆ;F9Ýžª²í÷¿#ûwk×J´çÕRIÒ¼ì܈àÓ¨¦ÕÒáŒEøk\sšm†^Øæ1\)Ô¶À¨ 6Ú"„Ž9e^?âüÞЀ‡¾æbmýÚ($ØÍp¶åx}Ôx$é…Ÿz"îÏ8N’ú‚7ªƒ€¶àjÌÇ´é3´ñõw"îÛ*ï ôjÏOÊ$Ië^¬Šs‚=Që^¬2Cíá.ö:gÞãTðp›£r@²Ó©TW–¦Øl vu«»­MÁÎN&¸ÅŒ0ò\û)mîñ÷]\¸FO•¾b¾og`<è4ÛŒˆ×_Ã+sÃå,)”ß÷Ad˜jÑo7¼š7<85ÎydÿnM·'jùëÍ6ïÑ5Qá­qîÅ…k¢aIºÐºÇð;Ö‚pÆçÅÏü ªÊÖØ6Í6Ãü¬ñÐ>õzåp.TΒ¨kÏLw(%Í¡ ]~zíÀ£‚€Ûá.FÕÿÞpT’t%Сgÿó‚ÐÝne¹×J’þ)Ù¨)ÙéÔÕË—£ÆòpU¥R³²ÔÝ֦åeÛ¦ÚíúîΟ)ÙéŒøüHY¹r7nˆyÜ­´`ÕJ}øöˆÏÒ\.-¯Üi޽Ëëå!޼<ù™ˆú½fàèp.´ü•ÿùÙ¹š™î0ÿΨxu¬qîþU½F Ü?dÈö÷G¿5oÞ£kÌÅÉjöìÐáý»õðêõ!¯Õxr–XΑU€m5žð0ÛêÞ¬Ž1*ˆš‹éöD]èòºààæðpû"ÜŘ˜j·+wÃu¸¬ü–Žá§ŸÖ‚'VéHYù°*nÿìûì^òuhzZª‚ÝÝn®“N-Ú¸AÉNgT¸‹øbKOWî†*Õå“¿è˜,ÂÛ„÷¦ª¡÷Û dJU«ª]cŸþÎXTîZõó5¬{)Ôâ fÏ]èòG„¼Eßû¹_xõ¯Uå¬qþíÎx¯·~ȶh#„µ »­‰‹y\Nü…»ÿ÷// Îðp{"ÜŘIu¹´à‰Uúð×oß’ëß=gŽ<±*æö‹í¾ˆ†›µx±$)ØÕ¥CëŸÕ•@`HÇÝ ³òò¢*Œ WƒAu·µ™¯1±Í~䥺\LÄ Œ0Òèq;Òãcõ‚mm U¥F¶3…™- föѽ~\xØQ);H¸l´`hy¿N‡÷ïÖ™ÖfÕìÙ¡iÓgD-¦6/F k؆WóZ/îf=ž3­'¢æË–c1úô;º•ü~?Á.€¸EÀ Àí‡pcê§Ÿ–¿±iBö©ýýë¯ÇÜ6Õn—$uÔ¿ìvÜDsÑç›P­#€Ñ0X9£­ÕñF/ÙÐö…Q׌¥åý:õz5Í6à N@8j1µ~áòùN¿Žìß­s¾SÚ°ës¿œ%…ÊYR¨_ý¸TMuûÔr¼Þ wcõΞ£èŠáþë`‹Ëd¾üz÷M}G·R__?d&´„»Ó4=9=æv^n/„»—|ºÛ9gB´g0¹˜aäÝߪzU U£Ö¾±CRt;#õû>P_ 7ª‚·ö¿“ZlíÆ1Ñí Â'3‚Ͼ`¯ïÿ¥9†þçÎ{tšêö™ãO¬ðÔªm‚UÅp¬j^ó˜‹Ëe?T Öãõj<´/ê¸ÆCûÌãb…ÏñÂ~O†î/|†:q‡€€Ûá.ÆÄ¹ÆF»»äX¼xTÚ3LµÛ5û‘‡åÈË3? tvé³¶6uÔ×GíŸå^«„Ô4óý}<¢TW–$©ÍSúCoa¡¦§Þ£ËÝŸÉWW'[zºæ<òpÄyÒ\YºãúboïV°³3ê8+ÉN§æ­\){ú1øõñ»‡£*ÃÍ)(Ð=YYÇ] ÕíõF›ær)Õ•eÞ—qß’ÔímS—×qOÆøcÍmªË¥)6›yMc“å܆_;|îæ?þ¸’ç:‡u¿ƒIv:åÈ[Õªàb»OŸ65 º@Üœ‚9ò›÷u±Ý§Óï¼ñ=se5/³/Ž8~$ó2çúÜ4/7ö‹ý]FïëŠxFüMú´©é¦æ;œïô[.6-ï×™ýjO·4ë­×^Ö´é3,Ï}¦µY)iõ{õÖk/뙫$…ÂÑ=?)Ó¹öSr8ªø™„ÝÎÀ*”½wîýJIsèB—?âÜÆù›êöIŠ ‰Ï ÐZ"|ެÛ/ ^ÍÛÿ˜ù‹¿µ¯WSÝ>ýé·ZþÄzïò«õx½jö숸7À­AÀ ÀípcæÄ¶íJ}ëMM±Ùnª=ÂU+õÀÚµf«CªKrhþª•:±m».únôÀ}°ßZ…7V“7ÂÝÙ(5+KÝmm¡ .55ê¸T—Ë Ñº½m vvFnªÝ®E~B‡ŸëµkÕò‹¿:.ÙéÔÒ­[e uÃ9òòôÀÚµúoåÿ‡yŸ÷¸\zðzØÿ¾Û<Õ¡p7잌ñGÎK¡rþú¯¢æÖ¸æƒk×êØ¦Ms~m£Ÿ¯Õ9Œû óP 4ƹ<±J¾ºzؾÝòøïîüYT/bã¸Ûÿ‹ù=s5Ôïc¨óììÔ’­¯ i^¬ž½þߥñ}-ÚðØÏÈ•@`DóObõ±£êt×J4?'W}Ïu®ý”–¯^¯[¢ü¾,«mï»P9K µçÕRµ¯×½sšªçBm|ý˱ö_˜MŠK_ضWÛž{\‡ö™çíÿ¹ÛS¥¯Dœ#¼ú×êºý[£'ðpúÿƪÎYR¨å«×ëÈþݪٳà t§Ùf˜ŸKñ¹˜L&¼L~„»3W5oÛ®üoq{ga¡¾óÜs’BÕ‰'««u©Ý§oÚlrä-Öœ‚%;úîΟéгÿÙ /Û<Õ²¥¥jNA(Ôí¨¯W°«{Àk»»Íà׬šlkSW«×Ü>˜ð@ÑßÔ¤Žºz} Ê–ž®Ö>-[Zšmø¡‚f`gKO×_üj·yõïêÓÆFIÒÝsräå)5+zK IDATKSív-ݺUŸ|R’ô™×«6”–íRjVdUògCX (<(ìÝYyyZ°j¥léiúîΟŠ§ß“j»¾}@—»»$)昇*<Øõ75éÓë1ðM›M©.—æ<¢)6›œ…úãÉ“aùT»]Eoþ_a}“ëõYÛIIRÒœ9Z°j¥møaÌ Wãy2Žÿðíêözõe0¨{\®!Ï‹ìvÔ×ëRGÇ€ób<{}—i.—ù}u·µéãúwÍçÝ—j×>õ¿MÚ Þ¾àçš—{SU»ë^¬RÍ;Ôò~½>ùè”ægçêÉ^Ñüœ\½YµIó²s#BÉó~ÍËÎUÎ’Bå=ºF ¶ªÙ³C§[BÕ¼y®Ñë×÷kãpÊìµÞÎ@ õàíîÞ;÷~íxû_uxÿnnmÖ'Ò×ï5硳׮$õžƒ¾°ãnÓk†½á÷Öìºßp ö×ï½ jÛS¥[•óP¡ZŽ×é\{(7‚3îpëð0¹îbLùåojQ{[zºf]òuèHyyD`åolÔÇõïjyåΨðø¤Ç£4—Ë w?®wÐ_ávvê¤Ç#éF¸ÛÕê5?Ì‚'V™ÁîïþóÈûôzåolTñõJæ?{îûúçuë%Ißyîû’B둲òˆ°°ËëÕ‡¿~[¹7hNAléiJv:uÑçS—×{ýžÜf 8Ô±NµÛ•ó׺ï®.ZÿlÄÜvy½ú¸¾ÞœÛX­-=MWƒA|ò©ˆªàþcväåÉ=8L¨C(ØýðíQ Ø…¾÷z3Ÿ•·8"Ü ¯ò>±ý¿DUIw{½æ_8XYº5´-Ö÷ñic£9/ßyîû–aaÌË?¯[q|¬y¹ñìÅþ.g36‹õÞʽsïªäuþðq…·uˆe~N®6æ¼3¬9J°'{<±¶Ÿïô›õüœè`بžO¸ /“×7˜ŒµÛ¶ëj0(Izàé§£~U>–ù+W𝛷m³¬DìòzÍ ÇT—kÈç ³ …o—|–ö•@@'«CcMv:#ÂÅPÏÔÆ˜¿Nÿqý»æk£ÿëMµà‘è¶í–s{Ñç3Çk‘Vþðó_X¶Û8ýöóõÝÃø^’çÎU°+Tüÿ\¿¾ÕØ.ù:,çc¶Ñc¸¾Þ2àô76šÏL޼<³ÃÉêjËïã¢Ï§?üüƒ>s±Ž?Yý#š[zºù¬X}_¾º:µyª-ÛL£©ñÐ>•,NÓ¶çÚv¾3ÔwWÒM‡ð€ÑåÌ{\:çFÏþ'NÈ3ÄÂ0qîbÌí¤ë½T7nÒqÆâR—|öýøÝÁç¬äX³¥§›!_Ç»õ±ÇZÿ®>ù”þ)™Ð5¼ø’öýå sŽÆƒÔ»º ÃCåþ‹šb?Ò¾¯¾º:üOOFÌ‘•«—ƒ–÷e„Öác虉|æ\–÷Þ_xr¬g®»Õz^FÒwZ -'…þb ÿÇ[-Cå“Nz<#¾0FÕ®ß÷ZŽ×«/Ыsí§Ôr¼^ÛŸ¾ËW¯jG¸õx˜|hË€qÞž!ÙéÔƒn÷ -Œðj(í iÙ.¼>µ¥¦š¯/µÇ5¯ƒöBµ¥§kú=÷(y®SS®÷˜¬Ž”ñ«ÿF/ØÆ{Éס»s”<×9èü•Ô¬,M±Ù”<ש»NÝ=Çi¹ØYx%ì@÷kÌÆ=^ 4åãC[¬gn´5;}à€ÙkØ‘—'G^ž‚]f«ˆÏNžœ´}v1±ä=ºF§[šÕT·O»6º£¶g?T âïý€‰€ êV¶hðz½êíí¹=++KIIIã6ŽÄÄD¹†ùçìžžµ]_T¸ÿx;&IÊÈÈPffæ„'`r!ÜŸ9±m»R¯÷œÍr¯Õ§´!g´tˆ@Æ+[zºxúi9òÇì;.¶aþ/oÉœ,XµR³¯/˜77tNµÛõàëAììÔ‘²rånÜh>ë¶ô49Ó ä, õ–ö76ꤧzÔƒe ¿u/Uéá5ëÕò~½ÎµŸ’ê3œ³¤@÷νŸ € Î*àMIIÑŠ+Æôºeeef:—Ë¥ââb•––Žz0iŒcéÒ¥jhhÖ±^¯WË–-“$=zTùùùæ6ãõæÍ›UQQ1aÇ ˜\w1nŒö ù?-гhãsQ±›õÍéÓãv^yyæœ.ù:ìîÒÅvŸ>»^¹¼¼rçmñœLµÛõÝ?‹u¯ƒºÔÑ¡‹í>õtt¨ËëUîÆ fòh vu©c€¶ áúº»Çmn.ú|úíºuJv:5» @i.WÄ_j½ /mòvÀHÝ;÷~‚\ˆc3Òî3Ã]Iºpᄟ×ë•×ëUee¥Ž=:ìÊUn„»WVícKK|Ÿë¿¢ßÕ:ñ’šj·›U¥Sív-ÚðCI¡ó?ÿ…å"`i£ü‡Ù«Á ¦ØlCjc1ýžÔqŸy+WÞè_\_¯“Õÿ8äÖ_†U';#®`½ Ú6äVºèóéâ믛Ï#/O³ 1Ãîï|ÿû„» ¦Ï|­êh:h¾Ÿ5k–V¯^=®c8zôhÌm^¯WGmmmêééÑc=¦ÖÖÖ ßZ`éÒ¥’4*-nVRR’9Z2ÀäF¸‹q׿=ƒ¿©Ér?£ÕBjÖÀÁfx…ç—·¨…@x×Yyy1ûÛÒÓõØ[oJ’^Ú¤©v»Ù†!V°k7šºÛÚäX¼XI³g¸Ÿ-=Ý Î‡ÒÂa4½nƒ]].2g:‡/b–šíŠî:b,‚v±Ý§Ô¬,%;!¼•,÷ZuµzõååËcÞaªÝ®¤Ù³•<שK¾ŽˆçëJ _]|uuÊõÇr,^lÙ@²vÿæoþF ã:ŽÚäç竬¬LÅÅŪ­­ÕÙ³gUSS3.}oÆp['Œ%—Ë5¡Æ;ß` 0ÞŒö ÇâÅ–û»^yhKO°rõA÷Z󵿱é–ÝÓ%_Ç€÷#I³yÄ|}©£C a ± T:û‘‡Í×£ô~z}ž¦ÚíZðĪ˜ûÍ_¹Ò|ýq}ý¸Ì¥v_ ÝAšËe`^ôùÌïaþã+cö/f"çåFµë¼°{ïÏYX¨Ýn=\U©ä¹sÇåùz¸ªRßyî9Í.x$æ~ãÀ€ø4Q‚Ý¡ª¬¬4_{&ðoUp+îâ–0Ú3 äÌæbjK¶¾b¹°Ö‚U+Í*ÌŽúúˆ€4¼ºqVŒJÍÑÔvýœ¶ô4-Ú°!j{²ÓiV¥v·µ)ØÙiöÓhŒ‹6lPjX¸÷7ÔÅØ|uu vuI’xúiËðÜYX¨«Všã¯Eºº¯¯ê›4{¶eìtjÉÖWÌ÷ýû-ÿþz»[zZTïÞ©v»þâW»c.ÒÖåõš×Ïr¯U–Eœìt*ç¯ÿJR¨}C¬j둯<ïÿt\×çXVOµÛ#ž/€pñìJ±Û444hË–-Ú²eË€Çû VÁZSS£Ç{LË–-Ó²eËT^^®³gÏ{¼]¯§§GÕÕÕ×Y¶l™¶lÙ2äk544¨¤¤Ä<ö±ÇSuuµå¾gÏž5ÇÓÿüý?ï?®’’’!UýÖÖÖÆ<Îãñ iî7¶ ¸eÂÛ3X¹èØK›´ôÇ[ÍPÎߨ¨K×CÆYyyfHwÉס?üüQç0zË.XµÒüuõÊ_6&÷ãolTG}½æÈYZìÊߨ¨//u·ÓirWƒA³r¹Ëë5ÛO,XµRÉÎ9êözìêÖÝsæÈ‘—'[zš¹•ð@põoþo] túÀ;ƒöŒ=öÒ&-¯Ü©©v»–WîŒ9·Á®.{iÓ¸=׿«Ô¬,MµÛõè/ÿAgP°«[¶´Ôˆy4æ¤PÛåõê÷?ÿ¹¾óýï+ÙéÔ_üj·®ú2xÙ¬ö5¾'+ÍÛ¶ë/vÿRSl6=èvkV^žº[½–ß㑲òQ½÷Ka•·Ë+w*ØÙ¥î6¯š·m×Éê”#/OSl6åÿx«.ú|f¥qBjšy‹Ípÿ÷»^çÿ`€)ƒÝá®$mÞ¼9æ~æ>±Ú@”””DU744¨²²ReeeÚ¹sè‹Ǻž×ëÕc=fâ644¨¢¢bÐk•——GT2jjjÌEçÂ{ëž={ÖÏÒ¥K#‚rã󬬬˜A®Çã‘ÛíÖÞ½{£¶õôôhÙ²eòZ´¢óx<*++Skk«Ž;6àÜF•»¸eú·g°ÒåõêHY¹YeêÈËÓƒn·t»ÍPï÷èHy¹eÔðÀ×öF»m¸æmÛÕæ©ÖÕ`P¶ô4-xb•t»#É#eåÆÍÛ¶™÷—êréA·[¹7hÁ«4ÅnS›§Z¿]·Î¬ÆtäE¶}ø¸þ]³ªàLžët¬}>)+;oôÜvÔ×ëÐúgì=;Ú|uuúðíæ½óaÌcw[›þyÝz³RZŠ®rýð×o«á¥Mæ¼NµÛeKOÓÕ`PmžêŸ»`g§jž|Êœ—d§3ê{ vuéHYù¨W3wy½f…®ñÌÏk°³SGÊÊÍïÚXðA·[ÎÂMµÛÇl\¸þ‹ÓT²8M}Þq¿öùN¿yýp¯mt«dqšïû%_ÀR<»555æë±X¤ìرcòx<ÊÈÈÐÎ;uôèQíܹS’Bm!Œ ô¦þ QR¢³gÏ*11Q›7oÖÑ£GuôèQíÝ»7âZ±ª\;¦ÊÊJedd˜ÇoÞ¼Ù<Öëõª¬¬lØã*//WCCƒ²²²´yóf$Ý;÷~¾$@”xvÏž=«’’óýX-¦–••¥††³ê5??_n·[ùùùjkkÓ–-[äv»G.744˜AhMMMTkqq±\.—>ùäUVVƬrµgYY™ylmmíˆæ¸´´4¢"¸¸¸Xn·[ÙÙÙ’nTð*++ÍûÙ¼ysDøm›ŸŸ¯ÞÞ^~`œîbT$\ê1]# ¯‚úð×oG}>Puã@×JUä•@`ؽX0r ûhÑ5«ù¹ iÎ.ú|êöl,7ó< ö½ vþ«Vêb»O=¬+€å}}3¬È—1Bú[9/ƒ]w¸ãÂÍK°ÏPÑ÷~ ™éŽ[rýsíÖ!îâÂ5’¤ù9¹|I€=ب_®×ë¨Ú-**³_í¯©©‰hg IIIIª©©Ñ}÷Ýgî3’ÊX)ÔÂÀ`'%%™é@÷kœn·[[¶lQOO¼^¯\,DÝ_FF†e«—Ë¥¢¢"ÕÖÖFUàVUUI µz°ªjv¹\ª¬¬Œæc‹pÀ¤òçž“j×a,®ÖŸ±PÜÕ`jW ɽsï¿¥Õ±§[›¯#²r·ø™ð匡¾KÝú þ &À„’œ±@é  ¸O{ö¬Ù7x Jj·ÛM¸ ãˆpÀ¤âoj’cñb-XµR—|¾~=lÓõÀÓO+õzEƒÑÛ·¾@¯Îù>P‚m†î{¿ÎwúÕT·O’4/;Wó³­+`ÍpÕ¹ÐlËp¾Ó¯ó]~ÍLshfºC-Çëåo?¥”4‡ægçZå{®ý”δžP_°W ¶De?Tó£ýBx…nÿë[ÝŸ$s\R¨Òw qïôëtk³.tù%IÙÜÖ-¾þòßôy÷Y~pL(ŸwŸÕŒÔû4=9Ír{¼/ž–˜˜(—Ë%—Ë¥âââ1]Œk°s»\.µµµÝTÿX—Ë¥¥K—êØ±còz½fˆkÜ[QQÑ -úW쎖ᶚ_n°c`0öwL*'÷z”š•¥)6›r7nPîÆ ºèóizjª¦Úíæ~þ¦&£*L\-ÇëµçÕR-_½^wh¿ï\ˆ,gI¡žùÛʈ¾º}^mîqIÒÞ¦.óó_½Zª3­Ízþ§{µç'¥f_^Ã3/V)ïÑ5Qc8×~Jo½örÔþoVmÒS¥[µ|õúˆÏÏwúÍE܃VãúϼX¥¼ôÐuN·6k×J´¸p^³^{^-3ûõJRÍž1ÇõVÕËQóQ³g‡æçäêùŸì½%½†o‡ÃÁ € ï««_X~OÁîµk×nù MGk7£ò·ºº:â3ãs—Ë¥²²2­]»vB?wÞaüÆÛXÒ€h„»&•‹>Ÿþyý³zpíÓšSP IJ[¨î’¯C§vOdLF Úz¼^—½zæÅ*ÍLsè\û)Õ¼±C-ï×)%m–ž*Ýjcô»µ˜Yí§i¶zþ§{•`KTã¡}jªÛ§=¯–jfš#¢Úö\û)m~¥ú½Ê~¨@¯~V ö:×þÞ¬Ú¤7«6)%m–r–ÞóõªáyýªŠo,¦¶0j¬}Á^m~¥æ¹iùêõš™æ0ÇõÖk/G„»}оç®W?UúŠf¦;Ôø\5oìÐé–fíúÛmØõÎmñŒ$$$hçÎòûýüÀ˜Pöï߯O?ý4æöx¯ØÌ’’’äñxTQQ¡šš544¨¡¡Á\tÌëõÊív«¡¡A{÷îÐ÷˜xwL:ÁÎN5oÛ®æmÛ•ìtjÊõÔ.utèJ ÀÝÆŒ@ôr W¯x~g¶(˜Ÿ“«{¢ö¼Zª¦ºýáî~·7ªfëiSÒfé…msÛüœ\õ{Õz¼^‡÷ÿÒ wû½Úõ£õzUô½Dô˽wîýš™æÐöç×[¯½î•·á!n¬j^c¬­Çë£*tçç䪩nŸyœáðþÝ:×~JçBm|ýˆ ÝæîÕËîïêtK³ÎµŸºmZ4$$$hÞ¼yüÀ˜P¦M›sÁîÈ Ö£¶¡¡ARhá±Ñ™™©²²2³oCCƒjjjäñxÔÛÛ+Ç£ÒÒÒa-Š6žÂ+™›;Z2ÀøùS`2»èó©ËëU—×K° 3(}ªtkTïY#í ôF´L° WϦÙfDÁ†‡W?+éF˜,I‡öé|§_çBË…ÐæçäjšmF¨—n§?ìZF…îýQ×UÍ»¸pMTë…þ¡® ‰kߨ!Iza›'ªõB‚=Ñï­ÛŒˆs§¤9bV ‡3X«7Öõ±†W‡³j/q¡+4®Å…k­Üu¯€ñ÷G_«þØq£âôvkÅ™™©¢¢"ÕÖÖÊëõê¾ûî3^£7##Cö~ÍÊÊ’×ëUvv¶Š‹‹•™™i.v&I‰‰‰–톣¸¸XK—.Õ±cÇäñxÔÐРââb%%%©§§G555fliiiDÈF ^YkÕÊÀQ¥P8Ú¿âõtK³Îwú#zûmŒ6ýCá¾@¯þÏ'þ½ú½ú¯oÿ«yN#Ž —®æíÿyøu¥ÈÊÞyÙ¹:ÓÚ¬ó]~Ë9y³j“Žìß­å«×[.¸µâ-Ø­¬¬µ…¶’’’TSS#¯×ìæçç›mŒëõ¯† ÿ<33SÅÅŪ©©1Çær¹b¶bp¹\:zô¨ù:œñyÿë%%%Éãñ¨¢¢B ½jë[õÚ5Æ9PÞXãÉ8ùÝî[Däçç›÷bÜËåšÐ=ƒ`²"ÜLzálëñú¨êÙ=¯–ê|§_çˆp×*\5ªi¥Pµoø¹ú½zëµ—%I¯~Ö qï{¿RÒºÐå×áý»£®ÿÖk/«/е°ÙV ÷G}6”jÞþÛÃS3îëLk³ŽìßÕ.¢ñÐ>Ù¿[Ól3ôðêgyˆ`‚‰ÇŠÝѨ‚µ:g¬óõóÌÌL••• ézIII1CÏÁúåfffZM<ÜùŠ5ž›§1V«ð×èqœ‘‘aîÓ¿þá5`lî&=£w^v®ÎµŸÒöç×òëeÓ¡}jy¿NÓl3´î¥ šœk?¥¾@oÔbfFà›ýPjߨ¡ ]~-~tÎwúUûÆïôk^vnT€ûTé+Úõ£Õ¾±C_?Wö’ùÛ?Ðáý¿4ƒå~zc‘#DNIsD¶gZö ƒWóößÞ¿õBñ÷~ ÆCût®ý”6»¿«å«×+Áž¨Ö÷ëÌ6O•n´/`ì…‡¸·k+ÜzTEE…ŠŠŠb.œÞvb¢/“á.`Ò3*Ws*PÞ£kôfÕ&íÚè6·ÏËÎÕS¥¯X¶_è˜çZ÷b•Þ¬zY‡ö™A¨¤˜- r–ꙫôfÕ&ÞÿKÞÿˈë¿ðÓ½í¬@ ÿ<¼rת7o¸Ó1Z6$صñõwô«—ê\û)íyµÔÜ–’æÐº«&e/Þ¾KÝ7æ€`@œX±b…>úè#¥¤¤ìâ–ÉÏÏW[[›jkkµeË•––š­zzzTUU¥ªªÐ_–Q¹ ãàŽk×®]câËŽ;ÔÞÞ.IržùH¶`I0*Ú¿=W—ívIÒ¿{¤D‰i÷MŠûú~Á<õzµa×;šŸ“«¾@¯ZŽ×ë|§_9K bö©5zê•«§[šµýùÇ•’æÐŽÿ*)¶ájÎ’ÂA«\û½:ÝÚ¬sí(Á6Cósr‡|ý¾@¯en¨Êøóˆ}ÃÛï»0æ"pÆ}ô?×üìÜI»ÀÚ•`ZüÌ|ÿÿðüà0D===fÀkÈÌÌŒXTM -H×ÐÐ@^Tî&5«ÅÄì‰Q=fû³ J­ªiï{ÌvVì‰ÊYR¨œ%…þ~‚=Ñ2tìúCßpï#^]¾Øe¾ž6m? CRR’TQQ!Ç£ÞÞÞˆ»*++rcÀÍ#ÜLjF ;/;wÎ5po[L|½›¯Çba&»¤¤$UVVª²²R^¯×¬Øu¹\TêÀ-@¸ ˜Ôn² oú\g¼÷¶ÅÄÖÛõ?Ôuú¿›ï w¸9ü»n½o0€É,ÖbbÃÕèÕùNÿõs-dbãÌWWÿMMÍ÷sçÎå?HÄ=*w“ÚÆ×ß•ó$ص·©‹ C_]ý7ý—Cº¼±ÐËŠ+˜qpLZ½]ÿCM#‚ÝÕ«WkÞ¼yL€¸G¸ &•+Á]¾Ø©‹ç>Ô;¼Û-Z¤?ÿó?g’L „»Kgÿ¥NwMù&qåòÅ.}ýå¿Yn[½z5Á.€I…p`ºóë¯Í×}—è/‹ÉaêÉ-[IDATîܹZ±b­L:„»SÊ…‹ú<)‰‰@ÜKNNÖ¼yó´hÑ"B]“á.À”ØÓ£¼múbÚ´Q9ß…”]š™")ÔëtÑ¢EL2ÆÔÌ™3•’’ÂD¸-î"Üùõײƒ£r®€Ýn¾NII¡‚€Qô ¦âá.Ä!Â]ˆC„»‡w ÝÅÄ7ß¼o3 ÀmˆÊ]ˆC„»q(77WÓ¦Mc"Äääd¹\.&€Qtǵk×®1 _¨Ü€8D¸ qˆpâá.Ä!Â]ˆC„»‡w ýÿÖAí™6»8IEND®B`‚ceilometer-6.1.5/doc/source/events.rst0000664000567000056710000002723713072744706021144 0ustar jenkinsjenkins00000000000000.. Copyright 2013 Rackspace Hosting. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _events: ============================= Events and Event Processing ============================= Events vs. Samples ================== In addition to Meters, and related Sample data, Ceilometer can also process Events. While a Sample represents a single numeric datapoint, driving a Meter that represents the changes in that value over time, an Event represents the state of an object in an OpenStack service (such as an Instance in Nova, or an Image in Glance) at a point in time when something of interest has occurred. This can include non-numeric data, such as an instance's flavor, or network address. In general, Events let you know when something has changed about an object in an OpenStack system, such as the resize of an instance, or creation of an image. While Samples can be relatively cheap (small), disposable (losing an individual sample datapoint won't matter much), and fast, Events are larger, more informative, and should be handled more consistently (you do not want to lose one). Event Structure =============== To facilitate downstream processing (billing and/or aggregation), a :doc:`minimum required data set and format ` has been defined for services, however events generally contain the following information: event_type A dotted string defining what event occurred, such as "compute.instance.resize.start" message_id A UUID for this event. generated A timestamp of when the event occurred on the source system. traits A flat mapping of key-value pairs. The event's Traits contain most of the details of the event. Traits are typed, and can be strings, ints, floats, or datetimes. raw (Optional) Mainly for auditing purpose, the full notification message can be stored (unindexed) for future evaluation. Events from Notifications ========================= Events are primarily created via the notifications system in OpenStack. OpenStack systems, such as Nova, Glance, Neutron, etc. will emit notifications in a JSON format to the message queue when some notable action is taken by that system. Ceilometer will consume such notifications from the message queue, and process them. The general philosophy of notifications in OpenStack is to emit any and all data someone might need, and let the consumer filter out what they are not interested in. In order to make processing simpler and more efficient, the notifications are stored and processed within Ceilometer as Events. The notification payload, which can be an arbitrarily complex JSON data structure, is converted to a flat set of key-value pairs known as Traits. This conversion is specified by a config file, so that only the specific fields within the notification that are actually needed for processing the event will have to be stored as Traits. Note that the Event format is meant for efficient processing and querying, there are other means available for archiving notifications (i.e. for audit purposes, etc), possibly to different datastores. Converting Notifications to Events ---------------------------------- In order to make it easier to allow users to extract what they need, the conversion from Notifications to Events is driven by a configuration file (specified by the flag definitions_cfg_file_ in ceilometer.conf). This includes descriptions of how to map fields in the notification body to Traits, and optional plugins for doing any programmatic translations (splitting a string, forcing case, etc.) The mapping of notifications to events is defined per event_type, which can be wildcarded. Traits are added to events if the corresponding fields in the notification exist and are non-null. (As a special case, an empty string is considered null for non-text traits. This is due to some openstack projects (mostly Nova) using empty string for null dates.) If the definitions file is not present, a warning will be logged, but an empty set of definitions will be assumed. By default, any notifications that do not have a corresponding event definition in the definitions file will be converted to events with a set of minimal, default traits. This can be changed by setting the flag drop_unmatched_notifications_ in the ceilometer.conf file. If this is set to True, then any notifications that don't have events defined for them in the file will be dropped. This can be what you want, the notification system is quite chatty by design (notifications philosophy is "tell us everything, we'll ignore what we don't need"), so you may want to ignore the noisier ones if you don't use them. .. _definitions_cfg_file: http://docs.openstack.org/trunk/config-reference/content/ch_configuring-openstack-telemetry.html .. _drop_unmatched_notifications: http://docs.openstack.org/trunk/config-reference/content/ch_configuring-openstack-telemetry.html There is a set of default traits (all are TEXT type) that will be added to all events if the notification has the relevant data: * service: (All notifications should have this) notification's publisher * tenant_id * request_id * project_id * user_id These do not have to be specified in the event definition, they are automatically added, but their definitions can be overridden for a given event_type. Definitions file format ----------------------- The event definitions file is in YAML format. It consists of a list of event definitions, which are mappings. Order is significant, the list of definitions is scanned in *reverse* order (last definition in the file to the first), to find a definition which matches the notification's event_type. That definition will be used to generate the Event. The reverse ordering is done because it is common to want to have a more general wildcarded definition (such as "compute.instance.*" ) with a set of traits common to all of those events, with a few more specific event definitions (like "compute.instance.exists") afterward that have all of the above traits, plus a few more. This lets you put the general definition first, followed by the specific ones, and use YAML mapping include syntax to avoid copying all of the trait definitions. Event Definitions ----------------- Each event definition is a mapping with two keys (both required): event_type This is a list (or a string, which will be taken as a 1 element list) of event_types this definition will handle. These can be wildcarded with unix shell glob syntax. An exclusion listing (starting with a '!') will exclude any types listed from matching. If ONLY exclusions are listed, the definition will match anything not matching the exclusions. traits This is a mapping, the keys are the trait names, and the values are trait definitions. Trait Definitions ----------------- Each trait definition is a mapping with the following keys: type (optional) The data type for this trait. (as a string). Valid options are: *text*, *int*, *float*, and *datetime*. defaults to *text* if not specified. fields A path specification for the field(s) in the notification you wish to extract for this trait. Specifications can be written to match multiple possible fields, the value for the trait will be derived from the matching fields that exist and have a non-null values in the notification. By default the value will be the first such field. (plugins can alter that, if they wish). This is normally a string, but, for convenience, it can be specified as a list of specifications, which will match the fields for all of them. (See `Field Path Specifications`_ for more info on this syntax.) plugin (optional) This is a mapping (For convenience, this value can also be specified as a string, which is interpreted as the name of a plugin to be loaded with no parameters) with the following keys name (string) name of a plugin to load parameters (optional) Mapping of keyword arguments to pass to the plugin on initialization. (See documentation on each plugin to see what arguments it accepts.) Field Path Specifications ------------------------- The path specifications define which fields in the JSON notification body are extracted to provide the value for a given trait. The paths can be specified with a dot syntax (e.g. "payload.host"). Square bracket syntax (e.g. "payload[host]") is also supported. In either case, if the key for the field you are looking for contains special characters, like '.', it will need to be quoted (with double or single quotes) like so: payload.image_meta.'org.openstack__1__architecture' The syntax used for the field specification is a variant of JSONPath, and is fairly flexible. (see: https://github.com/kennknowles/python-jsonpath-rw for more info) Example Definitions file ------------------------ :: --- - event_type: compute.instance.* traits: &instance_traits user_id: fields: payload.user_id instance_id: fields: payload.instance_id host: fields: publisher_id plugin: name: split parameters: segment: 1 max_split: 1 service_name: fields: publisher_id plugin: split instance_type_id: type: int fields: payload.instance_type_id os_architecture: fields: payload.image_meta.'org.openstack__1__architecture' launched_at: type: datetime fields: payload.launched_at deleted_at: type: datetime fields: payload.deleted_at - event_type: - compute.instance.exists - compute.instance.update traits: <<: *instance_traits audit_period_beginning: type: datetime fields: payload.audit_period_beginning audit_period_ending: type: datetime fields: payload.audit_period_ending Trait plugins ------------- Trait plugins can be used to do simple programmatic conversions on the value in a notification field, like splitting a string, lowercasing a value, converting a screwball date into ISO format, or the like. They are initialized with the parameters from the trait definition, if any, which can customize their behavior for a given trait. They are called with a list of all matching fields from the notification, so they can derive a value from multiple fields. The plugin will be called even if there are no fields found matching the field path(s), this lets a plugin set a default value, if needed. A plugin can also reject a value by returning *None*, which will cause the trait not to be added. If the plugin returns anything other than *None*, the trait's value will be set to whatever the plugin returned (coerced to the appropriate type for the trait). Building Notifications ====================== In general, the payload format OpenStack services emit could be described as the Wild West. The payloads are often arbitrary data dumps at the time of the event which is often susceptible to change. To make consumption easier, the Ceilometer team offers two proposals: CADF_, an open, cloud standard which helps model cloud events and the PaaS Event Format. .. toctree:: :maxdepth: 1 format .. _CADF: http://docs.openstack.org/developer/pycadf/ ceilometer-6.1.5/doc/source/1-agents.png0000664000567000056710000014157113072744703021226 0ustar jenkinsjenkins00000000000000‰PNG  IHDR—ûa»3bKGDÿÿÿ ½§“ pHYs  šœtIMEß  >–„ IDATxÚìÝ}xUåïÿsüé<@¦ PBCÇlñªI€¢cÂUsôˆ½¬„ ‰ÅË  aÎH‹­@­txf-f(0zZHR™‘à@¡<$ôîxÄ„H,ḷלÃïd­¬½÷Z;{…<’÷ë%Y{gí{¯ïºïû»î‡;nÞ¼yS¸ðgÀ-’Ë×H.\#¹ píNŠ ÿ¨¯¯Wyy9 G¤¦¦:þŽär?qîÜ97Ž‚Ðc¢¢¢ô§?ýÉöw,‹ÑOìß¿ŸBУêëëuäÈÛß1r¹;TÃG¥ t‹KV©¹±%è1$—û¡éO¦jnÎS€nñã¬5:êBÐcXàÉe€k$—®‘\¸FràÉe€kwRèΟº K«5fl¼Â"ÂtïØÑ Ћ¹<@å|o¹›©åÏ®éðØ59yz|l¦voz«KþöÕ+×´ÇÛ?|l¦›©«W®™?kjhVÎ÷–ëÇYkôÆk;ôã¬5Z›³ÁñøžÒÔЬݛËcãK¿êÒ²êJóÓëñ±™ª8yÀ-#¹<554ëÒÅ*IRÅÉ &?ù°Z’4æþø[þÛûw¼­ùi‹ÕÜØâóóÛÎ'<2LÃG5~âðïuéb•Â#ÃôDÖcz"ë1M2ÕñøžðñÅ*-þÞr(=ð»KÞÖ²zð¡úÜwn$á»â{XcºÔ–, Ssc‹J[¦D‡dèÕ+×Ú“’]°Å%&_‡ªu;V*<2Ìçç'·&pÓžLÕ /=kþ¼©¡Ùöøžðþáßëê•kJ›•ð»˳Z˪%pï|XìPE 'pËH.@çÛF*Oš6Q'/¨t™æær¨ÔáýG5wÑS Kc”î½÷öùySC³^Y¼Áv½æˆÁází×+Ì„äÇ«ôã¬ö¯^¹¦g­Ñ#Ó¾£•›–I’ùûí¥ùæï­Œçÿf"‡ûo=ç…µ{ó[jjhöyýƒ>çdØúZ¡ö¾m[N‰= —ó—šïÿúO·˜#¹%é×vH’þùƒ4ÿýþáßkÁò,Ízöq󸥧´u]¡ÏƃÆzÍþÃðÒÖª©¡YÿüÁ?jíâ >ë;—ªL‡ö—iݯW„œ`6¾ó«µ×ôÜô¿ñ)Ÿ…•½ïîÍo™åóöŽúçþQs=%I*ðK² ÿ"¹<ÀØ­¡<½-‰[ºß7YùñÅ*3kÝ ÎHÒ>2í;úEáJsTëð‘CµrÓ2sé#ùi¬üym¤Öõ“ÛFM[ÏiÌØxŸãÍõ™Û–0–ð?Þ8×…¦u;VjÖ³køÈ¡ºwìh­Ü´LÃb‡šKRH­ íÒß–)<2L›~»NÓŸLÕ½cGkøÈ¡šþdª–ü|¡yœÁº„Ńùž“ÝzÒM Ífâø‡?_è³®õ¤´‰ZÑ6jûDé)}lm|¾«W®iŒ'^›~û MJ›Ø¶ŒÉD¥µümnlqõK­ ò^zV÷Ž­{ÇŽÖ’×êÁ‰­ç러6–±ÛœÐiý슶×<‘õ¸ùóˆÁáš›ó”œø€Ïu€þäòc$W­£Q'¥M4“¯‡,‰?ãØa±CÍѹ'/¨âä…G†™ XÆæ{'ÿÞü™1W ÜTÎø;þKoØšv:ÞX†aÒ´‰¶K8ÌÍyÊ=+µ¦¶Ž<¶jrHÜ:æm/×öDìƒjjhÖƒ°] bøÈ¡fb÷¼%‰m¼Wxd˜¼”ð:#©æê;;Ôgù ƒñ}XéÖd·]yçk—x–¤ÃûË–&™›ó”Vä/5“ãèßXsy€1ˆþ ÞYYiëºBÞ_f&B/›ÀYFŸ8ܺöï¤i×û}ð¡¤Í¾?3FÏÉTŸßÙ$‘­Éhÿä¦Ýñï·%²e+üù'wå7¤öº­çY­¦ÆöѵÖÏn=Ö¿üÚ“­íçjŒŸåpNÆyœ?å; ÙøŽfe=n[ÆvËQ„òÏÍyÊö÷vIjs#ljöÚ=¤¤´Y©:PxP'/è¹é£'²37 tÚ¼ýÉåÆi4pÚ“©Ú½ù-Uœ¼ «W®iøÈ¡æ(ÝDK‚ñü©‹’¤IÓ¾ãø7ìÖÔ5“‘~ÉÚ¦†æöe.,£`„¨rÓîxãØðÈ0ÛQ¶vššµu]¡í Öd«5yú±Í’"þçkœ“uãIi;,«a–÷3ØØ”±Ór¡|çNß™Ýòƃ§¿a~?~¿¿wìh­Û±R_ú•>¯½Öº¹ß¦·”ödª,Ï yBô}$—§5”¥Öuq'M›¨ÒýeÚ_ø¶^xéÙö5w- [sÍæ IÂó6££+lFöJ¾ËIXv£¦Ž7’­NÏù36$ljhÖ°ØÖ5–ÇܯðÁáæˆæÇÇf”“Óh])0io&c‡=ãuF²º£å(ì–ßå;·.mpŒM"Ùxˆàôw‚­ÇœøÐúõá|(=¥ý…uþTëúÖ—>¬Öº_¯ Á p› ¹<€Ø­¡l57ç)•î/Óû‡¯´YíËH¸]Îàýw[—¨°&$ª2ò¼SÒÙamc»ã?·)mUqò‚>¯½¦{<ñºwìhíÞü–¹ò/ W¢´uéÿ‘Ðç– ¶„Çw?¯¦†f…G†µoVØÁrNåÕÑw1Ä>¡ÛÔÐl.)b)ݾibàß±®Ëm|Þ«W®éóÚk ‹h/³Ii5)m¢*N^ÐÚœ<]jÛtÑiyô/lè7€86Ì]½rÍL8ú'ƒ‘¸Í~›µöïxÛ)k,a1íŸ|5FÍ$Ö6¶;~ØÈࣃ_ÿɯ´ñ¥_™‰V#qí´LÄÛþ†ï¹^rÚ`Ðf=ici kâÙßÿ´lBèÿu´E¨#—÷»díóY šß«ñÝXGOû/ÿÑÔÐlž·õóVœ¼ g­ÑÖ× þFâChVÖã>ç€þäòb$Uï 2êÕØ|nw[Ñ?Á:)­5!»û—ÿ+àµ'/hëºÖäâ /e™?7FÛÆµÛœ.ØÚÆvljØó§.¬÷üÆk;tõÊ5sù ©=azµ6pij±î´õ}Û˯ý¼|~n³žô½cG›‰x#kµñ'¿RÅÉ ÓKYµØ¶O;­™ÝÑw¦ý;ÞöùÝÇ«ÌïyÉk ÍŸÛ­Á,µ¯Sm.Cbù¼Æùž?uÁ'9m°{ ú7–Å@ŒÑÀÁ““Ò&jXìP}^k¬cì› œ»è)(ý½.]¬RÎ÷–kú¬T 9T'Ÿ27Ç›»è)ŸMìŒÄâ'Þjm}­Pá‘aš›ó”ϦwÖD®ÓÚÆNÇ9TOd=¦…µø{Ë5wÑS ¦Òß–™ Ü›–šÇ§=™ªÒýe:PxPwèMJûŽ.]¬Ö‰Ã§Tqò‚ÆÜožƒÿùËÿ¥1÷Çë‘ï~G÷Ží¸žô’×jù³k´{ó[ºä­VÚ¬T576ûœ×º+}–(±[çÚúùF€ûÎÄúÖu…ºä­Öð‘CuõÊ5óûúáÏú”ç˜ûã¦æÆ­]¼AcîWsC‹N>¥ðÈ0¥Íj-?ëF÷Ž­'> ó§.襬UÚ¬TMJûŽ®^¹¦Š“Tº¿Lá‘aæfô$—ër­¡<+ë1s²":bp¸~Q¸R_ú•Οº 7,£TÇÜX–Z“¹û êóÚkÚ_ø¶œø€æÊ²®¯Ã:ÌN#‡íFõ¾ðÒ³’ZG ¿ñÚóçN|@ ^ÊòIÆ&>ô€,ÏÒîÍoiáÛÚ_ø¶ylþoÖéýÿץ«FAIÕ¥§t¢ô”ž˜×:ÊÛi ćк­ee¼Æú^Od=æs^Æwd7bÛúùÖcö÷±¹nr¼ææ<¥¦ÆfŸQÔÃb‡ê…—²¾¯ˆÁáZðÒ³ÚúÚŸó~"ë1½ðÒ³ÊùÞò¶÷í󺛖ê@áA³Lrµ~Ã;XÂýÇ7oÞ¼I1ô}?ûÙÏ´zõjI­#ƒû¦hÖ‘Äþ›ßÙ1’§ÃGí¶$£õœ:ú;M ÍæåPÏéã‹Ujnl éóÚ½®»?¿›òéè!ƒõØ1÷ÇÛnÙÑwÝÛŸóã¬5æ Êû·ÓÔ©SŽaä2:ÍmÒ01Äu‚{êœ"‡»>'7 å®x]o–Ï­$…{â»@ïbC?€k$—®‘\¸FràÉe€kwRýÏÕÚk:ê [47µtx Éå~¨ô·e*ým ×°,F?E!èqN¹IF.÷³fÍÒ¹sçTUUEaèÉÉÉJNN¶ýÝ7oÞ¼IÜ`Y €k,‹;wNŸ~ú©«×DGG+))Iaaa¶Ü>,Iš6mtâÄ IÒ¤I“( €û3â ¸-\FPÇ׎;:õÚ… :®Çr»ûõ¯m&kjjôƒü€‹i³^^¯—ëàþ €xˆGÄ#n ÿåg?ûÙÏ(89wîœ*++;õÚo~ó›òx<º"¤O?ýTׯ_°‰v\ñ€xˆGÄ#nOŒ\FÈ¢GQLÜŸ=¦®æOº~åK*?ÆÏxâÈõÀõñH<Ä#â· ’ËYLÜŸkì¤1A¹¨K6¹ì_Œzà›’¤ËþH…ÀõÀõˆG€x@<â¶Cr覊`ü_=`þ› ëë â‘xˆGÄ#n7¬¹Œ *++Í5—c¾¥¡-‹ñéŸtýÓzIRBB€Xs¹£Š`Ä_ UKà }y­Ik&q=p=Ä#@<ñ€xÄíä2‚"¹|k ®€xˆGâ ¸]‘\FP$—o½" B aÀõñH<Ä#â·#’ËŠär×TT4 ¸â ‰G€x@<âvCrA‘\€ †×@<Ä#ññH<âvBrA‘\îÚŠ€ †×@<Ä#ññH<âvq'Et®"¸ó®;>$LÞ÷«4hðÿ«÷Æèÿ¹»ãúìãkj¸Ö¬ð!aºó®;õŸ_ÿ§$™ïýƒü€Âîç×Ãà¡áqïÐÞ‡ëàþ Ä#@<Ä#ñˆþŠä2‚ÇûT’ôŸ_ÿ§.ž¸dþûáŒD¸7&èû\þÿ>Ó™w.:þþĉJHHÐäÉ“)ô>ìøñã]r=|öq>(:Ïõôxäþ ýâ ~Üû3ŠèõŸ7txLsà £0ÈõÊ1¸?Ä#ññH<¢¯bä2‚iÓ¦iРAº~ýºÏϽ^¯>úè£N½ç}÷ݰ&utt4ë$õ“'OÖ7ÔÒÒÂõôx”Äý Ш‰Gô’Ë€Ë Á_g+Ç£ôôt ¶7عîψG€x@ªm9¹üˆ»ˆC ›TÖ6\öò×H.\#¹ pä2À5’Ë×H.\#¹ pä2À5’Ë×H.\#¹ pä2À5’Ë×H.\#¹ pä2À5’Ë×H.\#¹ pä2À5’Ë×H.\#¹ pä2À5’Ë×H.\#¹ pä2À5’Ë×H.\#¹ pä2À5’Ë×H.\#¹ pä2À5’Ë×H.\»“"Ð_—Uyeªk¯«ú³:IRÊ’┞:ŽzÁ+E’¤¨ˆ0åÌIsõÚªÚ:íúÝqIÒ£ã=J๥s±¾ß3ÿm²FÇÆðá¶ÕÙø),>¦Ë¼®QߌVVú 2„û[WÜŸ®+Üê\vNå•—UQY#IŠ£øØè®é[m«•öê½3^IÒËÙ·]½þÞ™JÛ²MOIV’g ú,’ËúE#|gÉqUÕÖÙ60$itlŒ¶®œO#½v–W}c³Ï™NaÀµGΚ à™©É®: ù{Jµio©$é䮕·|.ÕŸ]×Ú­ív’˸Y¯÷¨È0y¬STdXH÷ü£g¼Jïéòär}c‹r7îSÁªù·Eåûrví p]a@«olѦ½¥æõëËkþ_RBœ^ÎÎPÆÔqÝÒV;zÚk> é+ÉåúƽRP¤ôÔqŠiãõù{JËvíÖ"¥NðhýgM2oÚSªGÇ'ˆFcY }º3ãÅõZ»µÈL,'&Ä)=5Y/gg(e¼Gñ#¢%µ>éñâz£à\˜ñâz=¿úŸTßxƒÂ@§,Îl­\\vÖÕkKŽž3ãšF0pkõeöší½ze§½ò<±\…%ÔÃp;)÷^6ûd†Ä„8Í›99 OV^Y£ïçnÖ²{DÙTÕÖÉóÄr‡Äpèý1ëëSÆ{”2Þ£ôÔd¥Œ÷hHÄ ³ž±0OåÞ˶çñÐÜÕZºq¯ê›èסç1r@Ÿ5ãÅõ*o™žš¬Ù¶ ¨ü=‡´lã>IRöšíŠa$ˆ‹dp+2¦ŽÓ²{õeÓ mÚ{8äðEGΚ¬ jStä¬ŠŽœu-ÖÝŽžöª¾±…/n#õ-š±0ϼ¿§§&+oI¦íˆãÂâcf›0O©¾l¼Ñå3YR&xô²úÎrÕŸ]¿¥º/Ï!Ÿþî¶UÏÌBªolQþžR½RP¤úÆ=ý£_Ê{`]ÀyïôF.è“–nØkVófNÖ[y9Ž#Ï™®‚•í —½Çºn«›^^¡ü=‡”:Ác>1·26¢ðßptlŒæÍœ¬œÌ4ÛJ}Æ‹ë%Ië8[ñ±1ÊݸOEegͧø£cc´þ‡³ÍÑ—åÞËze[±O‚;)!Në—d,×QX|L»~w\Þ§ K3ÍÍŒ÷ŽŠ Óâ9ižÛ3ÿm²ãÆLK7ìÕùZG¿³%7àg’´ëwÇÍ—c:*·¤„8-ž3]ófNæ¢à’<£?"ZÕŸ]WÉÑsÚ°43èñÖØ˜7sŠccûÕ‚b=ãí’ë.”X1âÑ?ʽ—•ûú>óçe§½zµ ÈgY™Œ©ãôòóéæ ‹%ÇõJAûzñQ‘aÊH§õKf;vÊN{µio©ÏòQ‘aJ™àQNfËý H‡sº¾l¼¡£g¼]²<ÆÎ’ã>SuƒÕ•F|TvÝ6ÞÊ+ktþ£ňù¢ô IDAT±]ߨ¢§s7K’YÚÝŒYIþ›•öjWÉqŸzÙˆI§¸ñée÷šë]†ºA°‘Èÿ²íov´ÉমZÿÃÙ¦W Š}Ö1÷ß,-”ú(”ºÆZß{­]›ÐZ¿9ëÓ® µ-N›²oÊßsÈüÿ¼%™®Ú…9™iÚ´·µO¬Nrº†îßNmµPê0cßž`I\cSë†çÎuŠQXÉݸWQ‘aŽ×0ÕŸÕuxÌèØå-™­úÆJ±”ÍŒ×Ûž‡]íkÿ:3µ­­i÷=uTgZûÁFùï,9°üâèØ¥Œ÷è§Ù鎣ß{„SxíÖ"ÿ¨&h{Á®ÿìó¡k\Ðç9çS‘‡*u‚DZ3X§ôKŸJÆZ ­ÝZ¤%ÇõÏ÷× j£b,¯¬±}ªÚ:}?w³ò–ÌVÊxϺdæß¯¬ÑŒ×ë-¹>çXýÙu•öêæM项«ÍFЈAú²é†ê[´vk‘ŠŽœUÁÊùŽçöèxçNpEeMÀÓmÿŸUÕÖÙ–M¹÷²íç1>Óó«ÿIEe­çÆèÓžÜJÓ²ûÌ™Áb×:3Á©ãàß`÷¿îò÷Ò;[rC¾îB‰#ºM7ÌŸ³…St䬎žöÊ{`r7î ØÔ¬¾±E…%ÇT^yY'w¯ ,“=¥Zj³ùÑ)3îN‰q `Õ|=4wµ¾lº¡ì5Û•2Áãú¾l$zíâÀ©®´Æ‡]¼Žn‹y¯íÕ£–‘qå•5¶Åâ²³*;íÕˆA>õ uÏ»˜,:rVY3§$µ­ç¼vk‘ÏFJUµuª®­“‚$— ‡:Û®~:ø.žñê•‚bÛöä÷s7ë-¹ª®­ Z}°ke@'{õvÇM7­õÍ›ëÔãþ×½ýVTvVŽO0ÛµiSº­ÛѽŠËÚ7^vJ:Éš9Ùlï•Ù· w•·½ƒµ{œÚj¡Ôaù{Z“Æïüj™í=»èÈYÇ奌sÊ[2[‹çL·­ûŒØ°»þ¤Œ÷è诊ËÎiÓžRåÌIë ½=Ýñ¾ávík§‘äÆ(õ¬™SêjëgÍßs(°ÎlKŒ;Ý#üû›Eegm¿§{„µ?|³­kWÆÁ¾?ëçëêuÀÑŠe1ô9FÒ31!®K—Uµuš±0ÏLž¾œ¡«ïþƒ¾:µMWßýsttUmžþÑ/׫Ì}½5qörv†NîZ©«ïþƒ VÎ7wð}¥ X3æéæÍ›*X9_Þëä=°Îgôµu€ÿg.¯¬QbBœ¼ÖéóË×W§¶)oÉl³‘ðô~Ùeeœ·d¶Ï“ìy3'ë-¹>?3Ê­¾±EC")oÉl³Ü¬ŸË¨È1°YG [×S¶‹G£Ñ¹Ø¦]î½l6 ‡D RÁÊù>×1ý¿¼²ÆíØ“²×lWbBœy^'w­4GZÕ7¶èágÖ¨°ä˜RÆ{ôΖ\]}÷ôΖ\s6EyeMÀ:y…ÅÇÌÄrbBœÞ\¿H_Ú¦¯NmÓÉ]+Í%²×lgÉ8cNYîìòÖNyNfšNîZiÖ•F}ç_W&%Äé-¹>#údÞÌÉæõ+µ&’ƒuŠë×m7ޱŽÔjÝ8j_‡qSXrLK7ìuü̯iHÄ åd¦éåì %&Ä}8f—X桺ڲûtóæMå-™m¶'6¡Ô:21{Ívň֛ë™ÇXë£W Š|ÞsíÖ"3‘—žš,ïufÌx¬3×s5Ú¼v÷ë~(ÆýÁˆ·úÆ3i7–¤ÑËÙæß·¶ÅË+khSöÖ{q’͌ЎX‡NÉζëÑÚÇxsý"ňîT»'{ÍvŸe<Œkô«SÛÌ:ÌÿZ´~Þï·­/mÄ•µN1ÚqË6îSÑ‘³fÝgK£eýYG¬má¥÷ÊóÄr-Û¸×Õ²Nçaí×YûkvŸÏh_–S®ÃC[£¿k­3ãGD+=u\[Ù´ï™dqÿ¿Qߨ¢µ~÷'c£Bã{1î}_Úf¶£Ë+k¯¥ŽúÆý­£6:ä2€>+*¢kF-;ëÀ Ú§C$VÕÖiÙÆ½Ž ã¼%³µbA†’<£¦¬ô)æäúÆݼyS'w¯RVúŽ1;ûFcß©Ñ-µ>¹>´%×gd€u³Â®Ü¼!É3ÊgÄhüˆ˜€‘ßÖr{3/G‹çL7ËÍø\FC¦èÈYÇQ ñÒuní`·$†Ñ¡1H‡¶ä*+}ŠÏu÷V^ŽÙ -;ííñ MüÏ+É3JÛV=gv„ªj딞š¬Co´ÎRˆŠ SêÞ\¿È|ëòõ-ægNLˆÓ¡-¹>‰­$Ï(z#×L”Ùuøka\+EGκŠÂâö)¬/gghÃÒL39`Ôw‡Ú:ªÆ0ãw©<ŠÑ^wõÉèØsÙÉþÁSÉQßû…‡Úš°ÊhëÀZã&~D´cÜÙM{Kmgç1]Yô mXš© 2tj÷*ÇÛ$–Ñ“Œ¶—Ñž´.Q^Y£øÑ:¹{•2¦Ž3±ÖGþIciÊxOÀ¦l£cc´ai¦ùþUµu>É7k;/oÉlm[õœy0â-Ø’Ë6î5ßï-¹Z± ÃüûF[ÜÚ¦äAjï«oºáÓOè £>rÉ*Io®_äÓÇȘ:N'w¯2¯ãPÛ=Öë&'3-`3xkVߨÐß³¶?¸ò©S¶äšçTXr̬ûÚÿFbBkÿÊÍL–Œ©ã|+õëôÖëî‰Ï롹«;L6;‡µ_×Ñç{+/Ç'ÁìÔ¯»yó¦mÉ5ëÌÊ¢_htlLÀ~*Ö·þ #IïßWÈßÓ^OׄñúÔ ­}ä” ³Æçóï?lXšiö‚µ Ðy$—ô­†Œ¥!;$rP—¼§QÑ¥§&;v³Ò§˜ªSblHÄ Û©HÖ‘TFß_(²<‡µX³Ò§˜™!l–ÖU߃ñ·‚­õºxÎtóÜœFecà°&~œ’ZF")=59àz/÷^6³‹çLwlœ¯Xa^wFg¹'“wvqj]ç}…ÍZñ£ccÌs¶ŽÞÜYr̼ï­² 1²… KÑ‘‚UíTcÆM(¬ËÕ8mÚ”äe&ÜÄž1ªé½3•>?·.ÉdŒ*².å/Fg¸èÈYËè¦LǸ±®j}°e5oæ”fIYËÆ¨(Ëè.)ãí—z³Ž ]<Ç~?£]jMèÕ7¶('3Mé©É¶³†ìÞßúzc„iüˆhÛ¶°oƽǩ->oæäÚ”NKw çT÷@ò-=5Ùv¦HTd˜ÏÌR»-þ¬×h(u˜µ¿WU[çÓþ´‹«¨È0¥§Ž šà쬬ô):¹{•æÍœCå•5f²ÙóÄòNõ‹Žœ5ëÚ`uæ¶UÏYêLû~]ÆÔq¶íóúƽœ¡”ñŸ¾ú þŒu´ƒ]N#ÂýûNK¸Xï}NmtÉe}е²û²ñÆ-¿ŸÏæ©Á×o¶&ÆìžÖ&%tüº³OöãGD}ÊmŒðì©­ùŽ652’jç?ú” x€Ë˜:.èŲÓí›kdÙŒZ¶^ßm{œ;$ã:¿=ÒáwˆëýÁ)–íîÖó6ßú;f ÿå1„8ÅܸS:¸çÉ'§%,ìõˆÿúþF]cM¤ù×sƽÄú@Êç^$nŒÍƒ$™›Ö:[0þ#–ßÌË!±Œ^©küG'†Z×£ƒßÊËqŒ™úÆÝ¡;lg<JÒ–ŽŠ ³m+[묎sÆçö…Þí“u[»1ÈõdPú?Öõ¡ƒ»q Zû{Ö~O°:pÃÒLz#WoååtKݽmÕsúüßòõΖ\3QkUU[§çWÿ“²W»[:ƃÁꮨÈ0KY´üœÚ‡ÞÈulWÕÖé²eöžõçFÛ Xl åÔöýY?_E÷6ô0`Äw°…õ÷_Ú¬»ÝáßèÌšdN'Uµu®7ÕpËšðz¥ (è5cŠ?Ó‹ µ>y¥ HÅeçTߨâÓÀßiUb×¹­·ÇemâGDkHdX—’°ÞÏhK÷’ËúœôÔqÚ´·ÔœrëfDâÝŸWRBœæÍœì³v›[)}¬ÃhM’wö3u–u§a #£cc”žš¬â²sÚYr¼}„¢eT§MÜ$¬£z:&ºCbBœÏú°ÁDE âBCH±¸bA†–mÜg.ÊýüåìŒë@7³uRÆ{´³ä¸™P6ÇC"™õ|Êx™\–¤"Ë4çÎ&™ºb¤±‘g|lŒ2~¬/›nhÁšíú`×Ê™:t…ìÕÛͤ‘HN™ài]š-!NIžQ*;í õÙÙYyv VÎiä£qCïÖ!Æ¿Î,SbÝâVûU%§­í¢y3';bð×ÛíÇü=‡T\vNÞÒ(ä$Ï(å-ÉTvÛrWn}ÙÔ}3ü—1H)›ºéwµ,‹eçò¯wø^<˜êlè ÏImEa4LB™ÚTߨ¢W·[:‚i Z» Æ|Ä–i€]9J#å•5AGWìÎ4d:³¼ˆO¹ ^nÙ«·+{õvǤ`g$}й¾^qÙYŸ]¸³‚Œ&ñÙ¼¥ƒim%GÛG4º¬c\ÜÁ=¢;¤8ltfw›ñâz-Û¸·Ã˜¬ VÍ7ãñ•‚bÛŽÙèØ³Þí(ò÷Ò÷s7ë•‚"WËgDE†)½m£Î¢#gÍ·v"­÷€ÂâöŽªÝg¼¦£Më[,Ó²»¦^_›ö–2ý‚5©”·$ÓqĽSiÄn°¬Öx³²ÖÓ;¦ËÖn-RöêíæòZè]óf¶·çr_ßò=¿°ø˜yoLïqLökÏøŒ|q}}kû0Øûú×aÖ÷–€=zÚ«§–mÖôÖ‡¼¡m0S“}êÖúu–äo¨Ë1Z?_GmÈöÍ;“]}–¢#íå,±l×¾·¶AÞ ²r¹÷²í5è¦ÿðýÜÍ­³8Šà]Œä2€>É:=üágÖtØyÌÝØÞà™7s²Ùˆ‰Š 3Ÿ–ï,9îø>åÞËæ& Öé{ÒÚ­EŽ £¡cm„ø6¼Ž¯íÌ”à$Ï(³‘¶ëwÇ“e§½*,9¦BËÓjÀèqg$«†D º¶õÁÒ«ÛŠ¯ÝÂâcæ5™âhãzvê—ööÊ5lͳ`óîßù{JÛFž–vÛÚî¸=Ëcõ¥Óý܈ÙòÊÇNW}c‹^)(nY|Ì1aà»FRø½3^3 eíôZwrÏ}}Ÿ¤ÖQHþ£È¬ë/ÇuT¯f¤vݺšÖ„ý‚¶ÑØ@WU[gîàÏX⪪¶NË6îµ=&wã>Ç{×Åeç‚¶Å_)(RaÉ16‰î#¢"ÃÌDa}c‹žÎÝÜaRµ°ø˜¹lÈA*X5ßñØ]¿;n{ÿ´²«ì÷øªÚ:ÇDm}c‹r_ßP‡%yFù´?£±C=§PÚÊF]²l㾞­1Ò^.Ö‡³Ö¨þ³qMu%}p³vk‘ù}teimk;m¤g +;íµ-ã»»Õþƒ1‹ÃS@Ü:’Ëú¤Œ©ã̤K}c‹zf²Wo÷©Œ„°ç‰åæT?»uKg¦™ꌅy£ŒÊN{5cažÙZ‘Ñ+ŸyÓÞÒ€{Ñ‘³f-1!ÎlÜ·w°“Íϰiïnö›ö”*{ÍvÇš­ŠËÎêèß ß(ÇúÆ=üÌš€'Ýe§½z:w³Yn/÷R¹¡o2‰å•5Ú´÷°ÙˆîˆqÝUÕÖiÆ‹ë:˜Æum4¬C]WÏèØVÕÖ){µo2¨¸ì¬y-÷´Ñ±1ÊÉloPûæÖd^‘ÙHïaJ1:Ó;ùµxN{]™½&pä “Fìø×µÖ‘CÆÒþI#)\nÙ¹Þ¿ƒžb•l}M°¸ù~îfŸ˜6âÆH¤Œ÷té¦M£cc|’mN‡¾Â:íüÕ‚bÛ¤Ï_-Ì ú`Èhsæï)Õ÷s7ë诪jëtôLkÝUdTrÞ’ÙflÎX˜gÛ¦4Úâ’hSö±>™q¿-¯¬ÑCϬѲ{}îï­›6ŸÕŒ×›m4£žèh‰3»võg¡îG±xNšy/۸ϱkŒ4űýùýÜÍ甽z»Yw9]Ÿ»JŽÛÖ}N¢"Ã|>_öšíšñâzíl{ã|ŠËÎ*{õv=ôÌÇ:Ø:0ªøè9Ÿ~]Td˜Yg•WÖ|>I>mÍÄ„8WK ø·ì@í,9ÐÖ¶ÞoÏ™nÉ^³]Ë6î5ï1Ƶl¦õû{ø™5Çî,9n&§‡D 2û)è:¬¹  ÏÚ¶ê9 ‰³¬¿|,hÃ51!N‡¶äŒ:66?X¶q¯Ù`1¦ßTvݬ\‡D RÞ’Ì.yÝC")O©ò÷”*u‚ÇçÜâGDkÛÊÀ'ÿ+²3tô´W_6ÝÐÒ{µtã^Ž øLÙ#"×Ê+k4ý…Ö \¼ÖitÛšx+ç›kd~?w³¢"Ô”çsn’ôf^ë[! ùblc\+¡4ä2¦ŽSÞ’ÙZ¶qŸÊ+käyb¹’âæ³|Lüˆh½¹~Qȳ ÏIÓÎ’cú²é†y/IJˆóÙxÄø»=mÃÒLUÖ:b¬ì´Wž'–›÷(ëgNLˆÓ›y‹¸¸Ð)«æë¡¹«õeÓ ÇNî¡-¹æNôk·iíÖ"¥Nð´.?aÙŸ“™¬µ&¯ŒdkÊx½‘k{_ðïŒútP Úÿí4;aÃÒL}ÙÔ¢%ÇUtä¬ŠŽœ5¼ôDܬX¡¢²³ª¨¬Ñ¦½¥Ê˜:Ž?è³²Ò§(o©**kTXrLGÏx͘µ¶é¬õ`umd¹¦·­zNÓ_\¯ŠÊ3æüÛá’l7ëJòŒ hSJ hïJ­›þѦì[6,ÍTRBœÙŸ0ú+NâGD«`ÕsÞÛÚaž'–ÛÖ5yKf‡ü`0*2Lo®_P‡#z­ï;oædsFµý™“™¦M{Kêk¢rÞÌÉ>‰Wëˆa£}é_÷u›’̲-;í šD5úvvåbôm­ñùÕ©mfUýYOi×¾NLˆÓ›ëÝ×™F;»ú³ëÊßSªâ²s÷˜!ƒÌ26¾ë5b´A**kl¯±y3'«ºöºíègë÷g³q¢1ŒÎ².á$É3J•E¿Ð¼™“ÍQÌÖ%câGD«`å|Û‡FÇÆ¨`å|ŸÙ2vë;[cÐn4uêùC"óm«žó‰£S^ߨ¢øÑz9;#hÛàVYú²<ú:»v°Ñ¦›7s²¼ÖùŒ46â5DE†éÔîU*X9ß'v[g ¶µÃ#œcÍ¿MéßÞMLˆÓ;½Ü‡‚~Þë|ê‰ q*X9_'w¯ ©o·$S/gghHÄ €ºæÍõ‹flvÄ®+¯¬ñyß¼%³×Þ°4So®_P§{mTdX@ÝwôŒ·ËËÖh žÜ½Ê1F¬çn¶u-£¨ýëLkûÚ¨3mÉíÔÃ#¹o)è9¹{•6,Í4?£ÿìÆCî¼%³}Öj7®«m«ž3ËvˆÍ&FÞÀx­ÑomŸ •¬C[r{m Ùí7oÞ¤स¸X%%%’$Ï#£5vÒ˜ Ç_Éx²\U[g&xÝœ›õunËÅ(“`¯/•[OY\pÒüÿ7ÞxƒxìÖëÚHÁqÔd}c‹*>jMZGÒ¦¼=ue2ÒÍõ`]­;û©SºªºÕ² ¥<»³ÎìÌ{/ݰWwÜ|ëò9Q‘ƒhô1,‹lEE†©°ä˜¹f²%ÇÌß‘8Fw©0—¸ qx;©þ¬Nù{J•ûú>Ç{Œ±!Ÿd¿Ù/z#—€­ŒÔds)‚‡ŸY£—³3­QߌÖå?^×Îâãæ¦Ûé©É!o„ÂoÝÌ->6š‚¹­î1ãT\vNUµuz:w³ÏIÓ¨o¶~Ç—ÿغI ±ÌINf£’û ’ËÀV’g” VÎWöšíªª­Óó«ÿÉö¸ôÔdÇÍÒ€ÎÊßSj>Ü0Ì›ÉÆ·“¬ô):zÆ«%Ç}6Rô7oædÛM…ÑûH.@/Ší³k6Ð×d¥OQÊ^)(Rye¹ûÑ_7jX¤æM»ßöwÕWµëÝuôüe¾vPÿòê,  ‡ä2€[¶»­óœ“‘¤W÷œÒîw½!uvï‰Ñ¿þüI‡ÆAƒfÿü *>©Ó ÿ®Rx ñH<TòÁ'’¤¿{þ/õ_º_»ßõ†”ÌŠ©ŸÎyÈñ÷>«þþ]=EÅï_²Q€xngF¸å—®©üR†„ߥEéI~—Žž¿¢òK×né}ã‡Ö¿¼:KCÂï’$·5@ñH<nì<|QõÍ_)ñž¥<8R£†Eš³ nÕ¼icÍ™ŸÔQØñ 8$—ÜcJSúÃcq·Òãóó[q·Ù8¨¾Ú@aÄ#@<€k»ßõJ’ži›NŸÓ¶y˜ñó[e<ì©oúŠÂˆG`À!¹ à–SšŒÆAú#÷˜?ïŠ ½¡åk³# €xˆGâp£újƒŽž¿Ò‡ßãóß®˜M I—?o$â°H.è4cJÓ¨a‘æŽôéŒ1§6pé–ÞߘR,µnÞ€xˆGâpÃØXsæÃ÷˜ë“ǬG¿+éÖg¼ºç¤ªÛ’Y3Û’dˆG` aC?fL]òß±wÞ´ûõêžSúeqE§v±¯¾Ú ÷þP«W÷œ”Ô:µÉ˜N €xˆGâծíÉ,cö€á™i÷ë½?Ôš³ œF9^þ¼ÑŒ9%|b>è™ùð=J3”ˆG`À!¹  S¬Sšžù®oçù™ï¶vžË/Õéèù+æ¨-ïý¡Va›ƒþ!áwé_^Å´&€xˆGâp¥øýKªþ¼QCÂï x 3oÚXýhÛ¿›³ œøTÞ¨W÷œ úw¥'ê§s¢ÀâH.ècJӣߎ5§4Œ©Mïý¡V»èØyæÑoÇ*ñžåd$¼?â ‰G #Åï·®}~·íhǨð»õeó×AgŒ0 Áú»”o$âÐH.è\ç¹mJSG£«v½û¡þîù¿´Yõè·cõ¯?’ˆG€x$.Ußô•ù°§£ÑŽÁfÄ‹d$@<‚ä2׌)MFØIÅ'uú²ùkíz÷Cåd$QpñÄ#Ð#ŒDÖ¨a‘Úúß§9·àïëòçžM€x:’Ë:ÑynÒôÌwï×Ö¿ Ò8ø‡µëݵ¹¨œÎ3@<Ä#ñô˜Ýmɬô‡ï š¤ÊÉHÒ¶ý{ÐÙˆGÎþŒ"à†uJ“ÿ.¿þži[«úóFss#Ä#@<@w*¿tMå—ê$©Ã8Ö7@<Ée®•üð»”þȘ Ç¦<8R£†EJ’6•SxñÄ#Ðí6UH’ï‰éps¯¨ˆ»5óá{Ú^G<Ä#·H.p¥}JÓ˜Ž7žN—|ð‰ª¯6P€ñÄ#ЭJ>h[¢fÚý!?Ùñ ÓXs@ȬSše$†ôšô‡ïѶý»¤ÖQ]ìî ñH<Ý¥újƒ‡Ö)öAãñ‘1ú霉’¤/›¿’$=úí‘ú陳 :ÃxÏø[x€x$¾Žä2€%ª–¢E®^?|pÀk~:ç¡[îD»=€x$â¸ýÅÜ©8òMʃ#ƒn<Ö™÷ˆG⸱,À5’Ë×H.\#¹ pä2À5’Ë×H.\#¹ pä2À5’Ë×H.\#¹ pä2À5’Ë×H.àÿgïþƒã¬ï<Ád›+6’å-lƒu¶c9 E5»- W,×ün†µ $³ªFr {S$¡ãTÈíÆky˜KÈli,WH\·3)ZsµÙ'fÍY°<ò΄Y$2[Yƒ±R°:%V¤ÝâyènuËjYòéõªJÅýôÓÏóô÷ÑCw¿ûÓŸ/@Ù„Ë”M¸ \§FÇÿ\±çÙ3±iíÂX|ÙÅ1&p!<õò{Åëc®_Ÿ‰Øý_ÞŽŠ _þàzÈ%\†y¦ïäXô|÷¢:¦Ÿ/~rܹÁõèØÀßü™÷"½rF€¡ŠŠŠs}@¿E‡y`ùòåÝ1]uEE\uþì·¶¶Ö óêz›pmºq=žÿë/÷ÚK,¯Ì'™L&jjj.úã¼ÐÁrMMMd20¸/®G§ræÉ›ƒÊÊʸ ûˆÞÞÞ¼Ð÷n\ß>òn¼‘3X}}ýû»|ùro˜ó×c®žžžLoß~ÝÂøÔêŠ ×å§?ýé¸âŠ+\¸gÈèèh>|8Μ9^q÷‹¢ïÿïí1`­\¹2n¸á† >N®G·ÚÚÚøæ7¿yQËÓO?û÷ïOoÿÑ ‹âïÞ<1þÚ¸xñâØ¹sçEYÀsízLŒŒŒÄ¾}û""âž{î‰ÊÊJ' .Âe˜*++#“É\†ÝÝÝñøã§·“`yñeãU÷n\˜dõööF&“‰††'×ã,:xð`^°|ãê±yÝ‚¢×åüãØ±c‡ÊE\3ôḽ½=/X¾·aQÔ,Žt‚Û$`>~üxœˆ²öïßïu.0á20cžÏ5Xžìô€¦ÿ¡9›ÍæË÷6ÿ©ïd>U» /`>r䈟"B™¯…“g–óšh‚M˜gë³\ŠþË0»rß¿^uEEÞ³·^“ÿYÑ{R¸°„ËÀŒx>—`¹ðô€¦odd$ÚÛÛÓ Ä’`¹ØbSñ©Úy=`9¢R J8zôè„ׯ;®›Þ[ïblf³YíiàMµÏr)ú/Ãì]›…í0 ?[Þ•ùà‹×ÁÁÁ¼*gàü.çd6‚å¼Óf˜¶Ü`9"âŽëN;XÎ}#Ÿ0?óÌ3®I(0000ágöçúÚX8Á¦þçpî×i9}–'{],ì¿ìº„s»6sƒâ›7,(úþõê‚jæÇܯzà.Ó6›ÁrBÀ Ó“Ífó‚åÏ×/œÒbSý [©åš„üŹ=Η-Ž{m¼úŠŠ¸ãºüþç¹!605Óé³<Ù{ÕÂþËæ%€s{[ªF¡ÍëÄúšüÇçŸp˜–ó,ç¾i0CyoÊs'›É`9qWfâ5yôèQƒÏ¼V¬Çy9“gNEaÿó—^zɇi(Ótû,—RØùÈ‘#Þ§Â4ÕÄ›PžÜ/I××Ĥí0н&æ¶ÇèììtíÁy$\¦¬0XŽˆØ°¼"þ¶,žzù½¼ÿõ|ïœ÷×wò½ ÛýÛþ±Ø°<ÿC€€&^Ÿ_Q1«Ár"é5™üqtt4öíÛç =óîqaó™ ¬J)œ`sÿþý^¡„™ì³<Ùkâ]™…y?Ñ×Î.·Ƈ·º(WîûÑ“'OæM Ì®E†˜ªboŽ¿V:DþæÍÓ®h}g,:ònYÇvÍ5×ÄòåË(æ£GNøEÁù–W_Q÷6,ŠowŸ‰_ŸCßÞÞ;vìˆÊÊJ'ˆ9­°ÇùUK#ÞüuÄS/ç¿>^µ4â+§_×1úÎXü㉱85š¿üŸ|d⮕••‘Édœ(¸6f²Ïòd¯‰w\·0¾ß;þ>öÈ‘#ñ±},œ(ñ>6÷—w·^³pZ­jÆ¿ÜYß{þLDŒ·ÇÈd2qÍ5×d˜eÂe`Öüì—cQ·¼bÚÎn`` ¯+"â_ŽÅÿþÔ™ ë~xÑøÏËù™a¡ï=&~rüì×çàà`´··ÇÎ;$æôâÜ`9"âÓo-þåè¿ýÍÓîíú÷ƒcñèŸÚ—®Ùl6¾ñoørÞwðàÁYé³\ʧjÄË'Çâǃã_2íß¿?jkk£¶¶ÖÉ€###yëkÆß«N×'VVÄÇWTÄ?ž¯ÚÙÙ_ÿú×½Â,.S¶cÇŽ¼7æÅtwwÇ©S§ft¿555g­öÈd2ª–™—zzzbtttJëþúÌø¯ ¦.÷|oJÁrbpp0Ž=ªb„9«Ü«§FÞ‹šÅÓûÐüâ±w 8LCaUäÍœ—¶5w\· ~ö˱xã—ciÿe¿è|ÌûEÁtÚaº+³0v?ýÁ¯é:;;ãž{î1Ø0‹„ËÀ”M¥ââèÑ£3./_¾•Ë̪‘‘‘Èf³éí¯¨˜Õ 6ë–/ˆO¯ý`û¹ÓÀÌQ¹ À¬êììÌëƒþ'Æ¢õñwòÖ¹jiÄÝŸº¬ìÉý¦2¹æèèhd³Ùرc‡“3Hå2³ª§§ç¬ë¼q:â'Çß+{Û?9>µÉ5_zé¥p2` —˜UõõõSZoäí±²·}rdjÙ°aCÔÖÖ:0ƒ´Å`V}á _(yßÁƒãñÇŸ‘ýüÞïý^ÜvÛmΕ˔M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù[844½½½±yóæ³näðáÃQ__ÕÕÕ³r É1UUUE&“É»¯««+=†êêêØºukTWW—\ÿBJŽs*ã p±*.8p ¶mÛ?üp´´´”Ü@WWWlÙ²%""ÆÆÆÎù€†††bûöíqß}÷å…±{÷îhnnŽl6›.okk‹Ý»wçmcÍš5ÑÓÓStýóeïÞ½166­­­é²þþþhllœ±±¸PжÅèééIÿ]Ü–Z·¾¾þœ¦§§'Ö­[>úè„jãd?…Ë÷îÝÍÍÍñè£Æ®]»"“É”\ÿ|زeK´¶¶–|ª–€K]ÑÊå$]³fMô÷÷GWWWZq[jÝR÷—£§§'†††Š†¯(¹~UUUZÜÔÔTrý󥫫+"&ÛMMM*–€9¡hårÒ8ié0YõrÒ›y&*„Ë ªK…¸RrLkÖ¬™µþÓÚ„Êå$—ÛÚÚ¢««+úûûcíÚµyë MÚ~¢««+{ì±têêêhjjŠæææ ë>|8{챈¬“~Ék×®žžžxì±ÇbÍš5ÑÒÒ’®ŸkEEEìÞ½;½¿pýÂcîì쌮®®ŠˆñŠâûè%ëçVBg2™¸ï¾ûòÆ£¿¿?:;;ÓçZxL„ô»víš°ŸdÉ1­]»6š››‹íɾêë룩©)úûûc÷îÝÑßßÖçÓÕÕ麥ΠÀd&„Ë…}[ZZbïÞ½ÑÖÖ6ab¼ÜÞÌ…áòöíÛ£££#""ªªª¢ºº:^ýõ8pà@tttÄ¡C‡ÒÊÞŽŽŽ4XŽoiqàÀغukz{÷îÝi`ÚÖÖ–VWGŒ¦]]]±uëÖhii™°~îñnÙ²% psŸÍfóŽ)""›Í¦®ßÑÑ{öìI«»{zz¢­­-]§¿¿?ÚÚÚÒ 9¹¿ªª*/\Š-[¶äµ"Ji×®]yÛMÆ#Ùvooï„û»ººâµ×^KÇ?ÑÚÚšö¨®¯¯þþþøÅ/~ˆ®®®xøá‡]À”Lh‹‘„œIUnžvvv e#&NP·mÛ¶èèèˆ5kÖÄ¡C‡bhh(úûûã…^ˆúúú AìâСC1DÅØØXX¶¿èêêÊë]üæ›oÆØØXZ]\¬]Fn°¼k×®ô1¥Ž©««+ –÷ìÙ“®?66{ö쉈ñ=·Zxll,·C‡ÅØØXºÍbÞ¹ÁòæÍ›ãµ×^‹þþþŠG}4ªªªb÷îÝyÕä¹Ûzì±ÇbÏž=ñðÃO8¶$DÎ}ÌÞ½{£¾¾>^{íµ´_õ¡C‡ÒžÕ…_”2!\Nz('íÖ®]›VV L“ŠÛªªª8pà@^[‡L&“n£Xø™»ßbÇT¸Ÿˆâ½‹­¿}ûöŠûî»/ÚÚÚÒÇd2™4έ†ÎmaÑÚÚš·ÖÖÖ4POÂåˆHCôbÏ#9ÞÜåmmmi°ÜÕÕ•×f£©©) ö Ç=9Î$$Îmý‘<¦P¼755åí§±±1}Laˆ PJ^¸\ª‡r>Â…AtÄ¡l[[[Ñ>ÌQUUùaf©ÞÍI…mUUUÑpy*ë÷ôôDWWWTUUMh!‘lcÏž=iÕoÄxåñk¯½V2¬}ýõ×',KžC}}ý„û ïþþþt÷ˆÒwîq%Ï/©$®¯¯Ÿ;…Û©ªª*:ƥƥ¥¥%²ÙlôööÆ–-[¢±±1ZZZ¢¹¹9ª««‹VŒ”’W¹\ª‡rD¤U­‘r&áhnån±7QXõ[,¨N”j—Q¬jºÔú“µÜ(eûöí±lÙ²´mD[[[´µµÅ£>šö{.lÉQ*\.ÖÂ#YÖÔÔTòr'ù+ÜV©çRê¹VWWGWWW477§Ûiii‰uëÖi‡”-/\.Uõ1‚VUU¥U»¥&Í‹ˆIƒåþþþ }‰' ¥KM„Wª·q±õ“0»T…p¡ÖÖÖèè航ªªØ³gOÚ"cll,úûûÓ}–x>‡³Sá䊹ËJ…ËÅúM'ª««#›ÍÆk¯½»v튪ªªèïï-[¶˜€²ä…Ëg &“6 ˜V5pÄ-*’I#&o'‘´Ë(bëm\lýÜI÷ ÅöíÛc÷îÝihôBîêêŠÖÖÖhllÌ x“È…ûHã©¶ðHö_JR%ž[Ý<Ù9ÊÝWr^úûûc÷îÝyý²×®]mmmÑßßŸŽ¡~Ë@9Òp¹XEq¡¤/qgggZ \Ø›9¢ødwÉ>’3·_ðÙú'ç¶Þˆ˜|2¿bë'j± ·££#:::âСCQ]]D pKõ[Nö]¬¥H±~Ò…­D %~noå³£bí7úûû£­­­dOéŽŽŽ’cPJ.— fs­]»6¶nÝ===E+—ÓV Ih™ŠÛo¿=†††bëÖ­SªÆ-Õ_øl½ ×Oö•[½›<çdYR¹[,0ÎÝþí·ßž÷|K­›û¼‹ÂI`ÜÙÙYt?Û¶mK+ Š'«ØÎ½¿0ð¯ªªŠˆ˜pN">˜X°T%4@1Âå³…Œ¹°…ÚE|п.Z  IDATöbûöí±e˖ؽ{wlÛ¶-Ö­[===±yóætD2AÞ¶mÛòúÿ–:¦rz'Çœô‹¾þúëÓcºþúëchh(~øát[ÕÕÕiËŽ-[¶¤-3n¿ýöزeK^;bz{{cË–-é8• „ÓÉõ®¿þúضm[Þ~†††b×®]yã}¶sTêþ$TÎ='»wïŽ-[¶ÄÞ½{£ªªJ[  ,‹’”ªú-ÔØØkÖ¬‰×_½äćŠÖÖÖèêêÊkÕ°gÏž¼v‰ŽŽŽhiiIÛ>ìÙ³'"Š‡È“õ6.:WWWGOOO´´´ÄáÇóßŽŽŽ ëg³Ùhkk‹½{÷¦Ál}}}<úè£ÑÔÔˆ_üâÑÓÓ“Cò|>]]]i{ŒÉúIg³ÙÈd2ÑÑÑ‘¸oÞ¼9ÚÚÚJVl—:GÅÚoDŒWI'}–sÏIUUU477G[[Û”';ˆ(.OÅdäEŒ‡›I€›À–ÒÔÔT´çï /¼0aÙÚµkÓJ穬Ÿû¸ä9vuuM¢'½ˆ;::бc­®®.:†mmm“V·¶¶FkkkÚ>c²±:Û9zíµ×&='Éã“ó'P¦kÑlïàbìå{¶êì qüÕÕÕçm_Beà\-0”K¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”M¸ @Ù„Ë”m‘!¦jdd$']gtttÆ÷;::/½ôÒ¤ë¬^½:*++$€óD¸ LÉÈÈHüëý¯g%<>›ÁÁÁhooŸtåË—Ç7¾ñ ' à<ј’ÑÑѲƒå«¯¨˜öþj*ËûÏÓÉ“'$€óHå20%Ë—/æææèììÌ[~Õ±xÑØ„õÿyíÂX|Ù9„Ë‹#n¿na¼xìÝ ÷ž©ˆ7~™¿Ïææf' à<.SÖÐБ0ÅÝ7.:§ ¹”ÍëÄæuùÌ£ïŒÅ·»Ïä-knnN €óC[  , yUÂoœŽøv÷™}glÖ÷Ëoœþ`™`àÂ.e»³`àâ"\¦å|Ì‚e€‹p˜¶ó0 –.NÂeàœÌfÀ,X¸x —s6³`àâ&\fÄLÌ‚e€‹Ÿp˜130 –. Âe`FKÀ,X¸t—7€Y° pi.³¢œ€Y° pé.³f*³`àÒ´ÈÀÌ8ú_ûãèí7’¸³³3">˜ïmÿÏ`àÒ$\f]©€9ùwB° péÐÎA&“‰Å‹—ý¸Å‹G&“™WcU¬E†`àÒ¥rÎAmmmtttˆ)*¬`N–™ëFFFâäÉ“Q[[k0˜3„ËÀyU0 –™ë¢½½=FGGãž{î™w¿Z`î.ç]CCC,_¾<""®¹æœÕÓÓÙl6FGG#"¢»»[¸ Àœ!\.¡2s]ww÷„0###€9Ä~0ÊË0ר\€”ÍfãÈ‘#éí/ŠøõãÀÜ£rfHa°|ÕqWÆ÷¸ÌM>ñÀ9‰ÎÎÎèééI—ݸzAÜqÝ‚øÙ/Ç s’pÎÁÈÈH´··Çàà`ºìÆÕ â®ÌÂ÷o —˜›„ËP ðgíÀܰ|ùò¸çž{¢¶¶vƶY,XþôÚŠ¸ãã 8sžp –an:yòdôôôÌX¸<00ûöí‹“'O¦Ë>_¿0>U[z:ƒÁÁÁhoow2˜„ËP¦hooÑÑÑtÙق刈ÑÑÑx饗 s‚p&‘ù‡ÿfàwlÕª8qÕªÛÞÑ£Gcß¾}i°üáEw߸0ê––k*DÄ»Íx||E…? f„p¦¨»»;:;;ÓÛ^qoâ¸úŠÒmÍâˆû›—Å©‘÷.ŠçP*€r —` ƒåe‹#î¾qò`9Q³8¢f±P€¹E¸ g±ÿþxúé§ÓÛW]Q÷n\‹/Ób€ùK¸ “Èf³qäÈ‘ô¶`Æ — ˆ‘‘‘Ø¿^°|ãêqÇu ËÂe˜`dd$ÚÛÛcpp0]vãêqWf¡Á€÷ — ‡`™sÑÓÓÛ·o/y&“‰êêêÈd2±uëÖÝg}}}ttt¤Ë·lÙ?üp¬]»6"ÆÛ¼tvvFsss´´´8aÀ9.@ާŸ~:/X¾ýº…±yÝÔtuuEWWפ÷'šššâÑG=ç}8p ºººbÍš5鲞žžt_I°1.>|8š››,àœ — GmmmÞí'޾uË+âê+ôYæìzzz""bëÖ­ÑÚÚZtÄÞ½{ãÀ‘ÍfϹ‚8 ‘3™Ì„ãØ¼ysÞºÕÕÕ±yóæ¼u¦K¸ 92™L477GgggDDüúLÄ·»ÏĽ ‹ÌœÕáÇ#"¢±±1‹®ÓØØCCCÑÙÙ9#ároooú·›HÂåÂùÀN0cüÎ 444Ä׿þõX¼xqD|0ÿäø˜Á¡¤¡¡¡èïïˆ8kepr Óßߟ†Õ“­344‘f'ár©€ûlr·;•çì˜_„ËPDmmmìØ±#/`þÞógâïÞ38•°ž-ÔM‚Ûúúú¼åýýý±mÛ¶X¶lY¬[·.£¢¢"¶mÛV4ìMöY¸$”Î ¹[[[£¢¢"ÚÚÚò_QQ‘ïîÝ»cݺu±nݺôJLj믿>–-[–þ2™aKKKTTT¨”€9N[ (! ˜³Ùl:Éß÷{߈ˆOÕú~–|Iïã ·˜¤íJa+‹-[¶ÄÐÐPÔ××Ç}÷Ýã“ðe³ÙèêêŠ^x!ª««ó‘f'ÇQUU•7™_±V¹Ë¶lÙ/¼ðBÚ&›ÍFÜ~ûíñÚk¯åÿîݻӺ¹¹9ª««£««+:::bll¬h«`î.À$’€¹½½=/`~ùä{qWÆË(˜J+Š¡¡¡Ø¾}{Ú>#™ô¯¿¿? –wíÚ•W]ÜÚÚMMMqøðáhmml6›Þ7Ùd~…Án±jædݽ{÷F}}}ôôô¤tkkk,[¶,úûû£¿¿?]~àÀhkk‹ªªªèêêÊÛ^KKKìÝ»7"&†ÛÀÜãS1œEeeå„€ùǃcqFÀL* o{{{c÷îÝîïïï®®®4X~øá‡Óðµ­­-†††bëÖ­yÁrDDuuu´µµÅ–-[â±Ç+ºÏbq±Ì…oòø5kÖDWWW^Utî¿CCC±mÛ¶ˆ™ ìŽŽŽ´*{ºýž€K‡OÄ0IÀ¼ÿþ8räHD|0ßqÝÂX|YEÉÇ~ïy“Îu¹àuuu¥ÅŬY³&:::¢©©)}lÈvtt}LÔæö]Îí…œò&-)еÊ( |“m´µµM““<"Ò@:›Í¦m;Š…ÇÕÕÕ±yóæ8|ø°–0—`Š*++£¥¥%""/`þÙ/ÎĽ ‹ŠÌ}'ß›“Áree¥?ˆ¹Aï®]»Š®³víÚÈd2B×Ü^Íå´‘Hö¹yóætÙÐÐÐY{+î·ªª*ý».v\¹ÛO&è+¶~!•Ë0÷ — L---±xñâxæ™g""âÓßî.0ÏE þrä½…m-Î&©ž,ŒÍ­".Üg±ök֬ɫD.VÍ|¶ÑÅé×_}²B&ó€ùC¸ Óðû¿ÿûQ[[›¶3xãtÄ¿ûÛ3ñG7.Š«¯(0oذ!¾ò•¯¼9¨TÛ‰©HBÜb=ŽIÅp}}ý„Ç«F. v§ZÍ|¶Ç ¹ shhhB¸ ÌM LOCCC477§·OŽW0ÿì—ú+Ï7Å&Ö›ªä1¹ý” íÝ»7"òÛQL6™_±Ày*ÕÌÅžSîýkÖ¬™ôX“ãTµ óƒpÎACCC|ùË_ŽÅ‹GDįόÌ}'ß38óD©‰õ¦*é³üØc½¿­­-úûûcÍš5i¸œÛ/9·Oód“ùå[©ÞÌ…Ï©pûÉv“9W6›=§ nàÒ#\€stÍ5×ÄŽ;ò懎¼? `žJ±SÕÔÔUUUÑßßÛ¶mK«‚‡††b÷îݱ{÷èèèH+‹ÃCCCEû7—Ó›¹ðþÂà9é'ÝÕÕ·ß~{>|8>Û¶m‹mÛ¶¥ë©\€ùA¸ 3 ¶¶6vìØ555é²ï÷¾'`žóÎÖ»ølª««£««+ªªª"›ÍƲeËâúë¯eË–E[[[TUUÅ¡C‡¢©©iÂ>‹U(çöeŽ(>Á^©ÞÌ“m?b¼Êúᇎªªª8pà@466Fccc<ú裱k×®t=•Ë0?˜ÐfHmmmìܹ3ÚÛÛcpp0""~<¨ÿò\—Édb×®]ç¨f2™èïïl6›NÞ·uëÖhjjЦ¦¦ ÕűvíÚ¼ÌÕÕÕ±k×® ÕÓI_ðÜã;Û1'÷çÚ‰–––hllL'ï[»vm455t˜Û„Ë0ƒ*++cÇŽy3s[nÀ{.ª««£µµ5Z[[§µÏ¤Š¸PÒÊ"WZ—³ýþþþxýõ×£ªª*2™Ì„ãLÂeUË0h‹3, ˜Up2—d³Ùhll,~÷÷÷§N%æá2Ì‚ÊÊÊøÂ¾7nL—ÕÖÖ.YIEòáÇcûöíéd~Û·o믿>""î»ï¾iMj\š´Å€YÔÒÒûØÇbtt4®¼òʉÊÊJÃ%§±±1~øáضm[tttDGGGÞý÷Ýwß„eÀÜ&\€Y–ÉdÒÌûØÇbÇŽ…KR2™_WWWô÷÷GÄx¯è¦¦&Ë0 —`ŒŒäMî÷ÒK/.ik×®±I €K›pfÉÀÀ@´··Çèè¨Á`Î1¡ÌÁ2sÊe˜aÝÝݱÿþ4Xþð¢ˆ_Ÿ1.Ì-*—`uwwGggg^°|oƒïr˜{|Ú€òøãÇÁƒÓÛW]Qw߸(jæá2Ì€l6GŽIo_uEEÜ»qa,¾ÌØ07 —àËëk"î¾qa,¾¬¢èú?þ¸AƒyâÔ©S€9K¸ Ó422ííí188˜.»qõ‚¸+³pÒÇå¶Î€K• ý`Ê –¯º¢Â Á10ÿ»¿=t㢸úŠ À¼$\†KD励"“™Òº+2u·ÞŸlnŽÃ;wÆ©¾¾Y?ŽÉŽ/Y~¼§×‰¼\{çgâÍÍñ·;ÿ­p9GCCCDD0Ÿ¯`¾·AÀ Àü$\†KÐ+O>ÃÇO½oÉÊQ»iS|hÉ’X²jeüÖž?Ç>W¼uú´ã¬Vf2q×¾d JhhhˆÊÊÊÈf³1::¿>30ß•YŸX)``~.Ã%èÕ'ŸŠã==%ï¿|éwcã×îÚ›nŠË—.O47Çó=tÁŽ÷ÿjÜâ¤1gd2™Ø±cG´··§ó÷ž?Ÿ¯_Ÿª-=•Á©ÑˆŸ/~}Æå{sÔpñ.ÃôÖéÓqäÁoEíÁÿµ7ÝtAÃe˜kjkkcÇŽñÝï~7N:ßï}7"¢dÀü—ÿN¼áÌ!Âe˜£Þ:}:NôöÆŠúúX²jeÉõjêê¢vÓMéí·‡‡cà¹îíµ»¢¾>""~õóŸçm7Y>ôê«ñÖéÓqùÒ¥±ú¦›bÉÊeKá㆟ˆÁçž‹·NŸŽ%«VÅG®¼2ÞùÕ¯¦Ý{zE}}¬¼>¿Ÿtî>Îvl½ùwâCK–Lx\M]]\ö‘L—\KV­?‡ï?·ˆˆgŸ+ù\._º4ª?úшˆ8ÑÛ[ôŸz¹/ž{®èã–Õ­O—-«[cccEÇ®Øß͉žÞíï}±«­­;wF{{{ FÄxÀüòÉ÷â®ÌÄ—WÁ20S*++ á2ÌS5uuq×¾Xt¾¾ô¥è{âÉøoßýîŒôjþ½Ñ›íŒ³Ù Ëÿ¦u{,Yµ*þéî‰Ë—.p,/f³Ñ›í,ºíº[o-ú¸·NŸŽÿöÝ}Q¹bEÔ·4ljÞÞøÑ}­ewí¦MqÿX2œëôé8úÈ#%í†/~1>zËÍEíÈ·þ,®ýì±¢¾~¸DŒ‡½ŸøÃ?Œk?{ç„í~²¥%NôôÄó}gB˜»lýúøíŽ=±ÿ¶ïÿjÔnÚ4aÃÇŽçMö˜û¸Ü±ˆ¼±»|éÒØüÀŸ”œ¼±ÔqÍU•••i‹Œ$`þñàXDœ)0'~ï÷~Ï„€iY¾|yÔÖÖ. Âe˜Ã’Êà·‡‡ó–×ÔÕÅoíùó4ôxî¹8õòxø¿lÚËêÖGÝ­·D͆ºø/Û¿<ë“®»ù樻õ–ˆ2‡K+v#ÆÃÔ_øyô=ñDÞã>ÙÒõ-Íés<ÑÛ§^î‹•×gbE}}l¼ÿ«Ó9k7mŠÆ?} ÝöÀ³Ï¦“(&ctùÒ¥%mãý÷§Ïiøøñ8ÑÓoÿ*Vf2±¬n}4þ铎ëoíù󨩫K?ðìsñöðpÔl¨‹Ú›nŠ™LüÖž?ÿ÷ûã’UÏÉ6†7_y%Þ>}:Vd2±dåÊ “=&ã÷¡,I«—ßì{%ÞþÕpú·qùÒ¥yÇ•;±d2æÉqͧI$+++cçΑÍfãÈ‘#10¿9z&þ膅±ø²‰ýÝvÛmþÀ%O¸ sTRu1ðì³é¿“€ðò¥Kãíáá8üõy“¾˜ÍƵŸ½3nøâ£¦®.þÙ¿Ý~kVµîÖ[â;W¢kçμ ´¦®.~÷/ÿ"""®ùÌynM]],¿Ù÷JüÍöíi˜ùbv¼¢ùŸ}ñ iZ®÷µè¶“1Z™É¤•¾½åæ¼c[™É¤ÁòÀsÏÅ‘¿•÷ød| +šsÏ]rÜ?ýá#úe×ÔÕÅowìI«ˆÿóÝÿªèvjêêŠVE7|íþXË-qùÒ¥Q»iSô=ñDœêë‹Ýך÷¼ž衼¿ÚM›ÒãúÏwÿ«¼à>ó÷u¼È-7ÇOðÃyu͵´´DD¤sßɱøv÷™¸·aQÑ€.u \z–Õ­¯-ò¿õ·Ü¿Ó±'®½ó31^uûbç_¥ÍmÓðßùn^x˜øé~˜öä]Ë-±dÕªYN…ÁrDÄ©¾¾xåÉ'#"&„Ä¿‘óü Ã߈ˆ¾'žˆ;;§u,µ›6¥cԛͭÀ=ÞÓ“ö4Nz§Çö~+‹áãÇ'ËÉø–j¥qùÒ¥é¹;ÑÛ[t"ÆS}}ñßùn:.+Kµ¨èí,'ç=qeý'§<.•+Æû>¿uútÑŠð¾'žˆçž‹½½ñÎð¯æåµÙÒÒŸûÜçÒÛoœŽøv÷™øÙ/Çü‡ €9Gå2\‚r«’'óöðpüÃw¾›Ú&ýw‡ŸÐÊ!×ó}'joºéýÇÜ4«U¨I+Œb’¶ …V¿l¯<ùTÉö ?ýÁã“ÍÍédzS5ðì³±ÿ¶ËÖ¯/¾'Ž¿Ð+êëó*/_º4·ÉŽíè#¤•×¹rû#¿øp¶ä¾ûžx"­®^½iSÑã|õɧŠ>ö­Ó§cøøñ÷ÛcLý‹ƒw~5œ>ÇkïüLüô‡LX§ëß|}Þ_Ÿ¿ù›¿‹/ŽÎ÷¿ÜHf˜k„Ë0%=~_ìü« ¡mÒÇøÄ$¡iDÄð±cñöðp|hÉ’X‘ÉÌj¸\ØúlVf2i ;ôÊ+“®ûæ+¯¤Ï¹o>]4°]Q_KV­Šeë×(oÙúõé¿>É¿uút¼Ù÷JÚß8‘TGD\ö‘Lzì§úú¢¦®.j6Ô•<‡¥üêĉX²reYc2ðìsiX×¾ŸhnŽŸ÷öÆÀ³ÏMúÁ|ÔÐÐû÷ïÑÑÑøµl€9H¸ — ç¿óxóåâÕ½ùÊ+SšH­TEpá¶VÔ×—]ù[®S/÷Mû±³h.Yµ*~ãŽ;¢fC]¬(Ñz¢ÐeeŒÕÛ¿šª¯¼þƒý4þ:¥í|äÊçåïnøØ±è~ð[ÑðµûãCK–¤=›“ýT__¼úä“ñêS?š7“ùM¦¡¡!jkk£½½=FGGý‡ €9G¸ — 7_î›´]ç.™œnÂØ÷½Ã'ŽÇà³ÏÅ•õŸŒõ·Ü’w©Iú¦#éé|1xöÙ8ð½Q»iS¬ÞtSÚ$b¼ÿsÍûÍÿeû—‹öežojkkcÇŽfæ$á2ÌSS©FN&ª+·mÅù´dÕªˆÚWf2i°<|üxüG)èW¯_?á±åTR_ö‘}|Ò ãG÷µ^”cþÖéÓÑ÷ÄiÏîÚM›bE&µ›nŠ%+WÆåK—ÆæˆGÿà\h10ã߈öööŒÕ«W愆æ—7ûÆ{¯ÈLÞ‡xɪUiî¹´­˜ ¹!ï•õŸœtÝ$ /Ço|öÎô߇¿¾3~úƒ­/ÖëøÍœЫ‹ôdN\¾tiÔÔMþøb÷çí¿®nF+¥§bɪU&xöÙxþ¡‡âÑù1ðÜsﯷ2VN±•È|PYY;vìˆ/ù˱sçNÀœ \†y& Ikêê& /?zóÍ鿟}ö¢{¯<ùdDD¬¾é¦’뵟½sZákRÕ}¢··dk‡Ë—.-\¿uútÚ΢v’c»æ3Ÿ)ºüÄ „Ø-h¹‘kɪUñ»ùñ¹ƒÿ)¾vÿyóÛÿïÿ·ÿÇïÇ _úbéóòÄ“.²*++ãå—_Žööö0 \ò„Ë0ÏüGIÿ½ñk÷ ?kêêâ7>sGDŒW:_Œ½sÿÇÇŸÇåK—Æoíùó Õ´u·ÞŸøÃ?œÖ¶“6 +ê닎ÏåK—ÆÆnÿìb IDATû¿Z28~ñálDŒWïn~àO&¬w퟉ú–æ¢=Õ×—†Ó×Þù™¨»õÖ¢ûßüÀŸL‹™V89á‰÷¿˜¨½é¦’UÉëoùàK‰Ü*ìùndd$xà8xð`¼ôÒKqðàAƒÀ%OÏe˜g†‹ç¿ó¸á‹_ŒšººØúýÿ?éìL[_ÔnÚ×¾ßâíááè~ðÁ‹òyœêë‹#ßú³ØxÿW£¦®.þ×ÿÆÐ+¯Ä©—û¢fC]¬Èdâíáá>~<–¬\YÖ¶Ÿ}.¨î·öüy¼øp6Þ|õÕøÈ•WÆ’U«â“ÍͱdÕÊx{x8­r¾|éÒxëô鈯ÿé‰kïüL¬Èdbë÷ÿCü¼·7Þ:=5Æ+Æs[¨ûÁoÅïþÅ¿-Yïÿj¬¾©!ž}.†ËÛÄx÷L†ÿ¹ýµ7ÞÿÕXËÍ1|üD<ÿÐCñbç_Eí¦Mñ¡%KâÓüI¼úäS1ð~Uû‡–,‰k?{g¬x?tþéIÇc¾;zôhìÛ·/oB¿‘‘À%O¸ óÐOðÃxgøWñϾø…¸|éÒ¸áK_š°Î›}¯D÷ƒ^”UˉdB¹äy¬ÈdÒpsøøñ8üõqý_*;\î{≸²þ“±þ–[¢¦®.ÿ?8†?|$^}òÉøÝ¿ü‹ˆˆ¸²¾> Z#"žè¡x{x8®½ó3qùÒ¥Q›Ó9Ûä±…†‹¿iÝ›ÿôX²reÔnÚ”÷øÜcxþ¡‡ftL“Êé¤j»vÓ¦xëôéxþ¡‡ÒãúíŽ=qùÒ¥qígïL¿ˆ˜íãºT=þøãª”˜³„Ëp‰øyOOôfÇÿ=|âÄ9o¯ï‰'bàÙg㣷ܜ\¾=<¯<ñd^PZê8¦²<"¢7Û™®3•åSÝnî󸲾>–½ßCú;¾ôø?råŠñ1;v¬¬ñé~ð[ñóÞcõ¦›ò*Œ¿Ð¯>õTº½ä9ób6Gy$j7mŠÊ+&ÛdNõõÅ£ÿò¢îÖ[ãÊúOæµý8õr_¼Z¢byøÄ‰ô˜&û;yõÉ§âø =1RdÃ_ß½åæX‘Éć–,É›ÐñT__øƒÏçÝ?•ãšoFFFbß¾}ñÒK/ùs–p.Ç{zÒÉøfÊ[§OÇOðÃøé~xÎÇ1Ùñ½˜Í–µ|*Û­©«KCÌ·NŸŽgŸ-Ú&í#†—È÷=ñDZ]J±ç°¢¾>í›üÖéÓE·‘Û‡ù_ ŸÓ1ä>vlJã:Ù6Ïöw1¿›ù¤XŒõ5]ß{þŒ`Î.—¤îýR¬¨¯çž‹®óõ¢ëä·¢8Õ´7Þ{o,«[¯<ùdt?ø­³Û‰zœÐ9¢XŒ›7,ˆ[¯Y}'ß3@Ì) p)J' ¼é¦¢ýˆkêêbãý_ˆñþËSiE1cÇÖ÷rDD¬¾é¦Xù~è\+3™ø§_¸'"Æû/k#qé‰ööö¼`ùË"¾´qaÜzÍBÀœ¤r¸$ý¤³3Ößrs|hÉ’hüÓâT__8×l¨‹š÷û/¿=<GJTÏ–;ÿ*j7mŠË—.ßîØ3é±u?ø “y‰+ÕãîÅâË*Š>fttT?f.yÂeà’ôÖéÓñ7­ÛÓö5u„¶‰½½ñü·:ï•ÁÃÇŽÅß´n†¯}-–Õ­/ylÝ~«ì‰¹¸LÖc2ƒƒƒÑÞÞn¸¤ —KÖ©¾¾øÑ}­±dÕªX™ÉDåŠ1râDïé¹ Áí©¾¾xü¦®.V\Ÿ‰Ë>²$"Æ{?¿ùÊ+BåKÜÈÈHìÛ·/¯úøÃ‹"î¾qaÔ-/ÞqªT3pá-[l `:„ËÀ%oøØ±è»HÃÚS}}z*Ï1Óiƒqõqûu ãÅcïD¸ˆ,¾¬"þy­9`:„Ë0EÓmƒ‘ؼnAl^g.]æá2œÅtÚ`À\'\€IL· ÌuÂe(á\Û`À\&\€Ú`ÀÙ — ÇÉ“'ãÐÎB¸ 9º»»ó‚å«–FÜÛp™€~Û 92™L,^¼8½ýÆéˆï=&Fß38C¸ ðÿ·w÷ÑQÖ÷Þï?ˆBfHH¨™Ü;à ÔLRºÏ!D U1±õ„¥]w3ˆ¸V%({c·¢ µ•nZ» ÅLö^[ñ),¼!BÙ a½) ‰I4î&؉BA-çẘÉÌ$™$§÷k­®5™™ëº~ó‘&Ÿ|óý~ìv»V¯^­¤¤$ó¾?7^Ò¿~ðþ÷WÌ—h'>>^«W¯ÖøCó¾3ç¥ýàøüï.Ö¢E‹ô³Ÿý,`LÆ»˘ D¸ @‡œN'c2p€N0&€`×SºfÑ¢EJNN–ÛíÖùóç%ùÆdÔœþ»L®¨†uxüŸü»>jü»ÎM- Ú.òWL€¡p€c2^}õU>žÂ,Âe®²ªª*™]ËçÏŸWSSá2`@#\à*zï½÷´sçN t—¸ êëëåv»uòäIŠ”—èe¡º•'ÅI翦¿|u‰ÂezI¨nåï\/e'×̉×icÅ× 0h.Ð öíÛ§;wêüùóæ}“⤟¤Þ ¸(ê|—èÓ§OËívëÓO? ¸ÿžÉ×);yxØã:¤êêj €!¯ý÷ÒÂeº)T·òÿ=L?q×?ŒÖ᱇¢€€p€µµµéw¿û]ÄÝÊ·%Wí™o) FRÌu@—ˆ€Çã‘ÛíîV·ò̉×).j˜N¶üBíÜ–x]§ßSè_—è‚¶¶6ÉãñÜÇ„aÊN®¨ºöMð­¶aºÕ6œ‚<Âe:ª[yL”ôçp9âù³=ÀÐD¸ @mmm*..Ú|/Òne#ÂeB¨ªªRQQ‘NŸ>mÞ÷륇@·2á2AŠ‹‹µoß¾€ûnIðmÚG·2>„ËøÙ·o_@°ü륟8¯×­6Beüñw½tàoßHñ^¢´Cç2КïM¦ÀwqÄȈžçwJ’vîÜ©óçÏK’vW}«cùV?I½^ÿ0šf$ÂeøikkÓÉ“'îóßÈ*R§OŸÖ§Ÿ~p_RR’¢££LMZ­V>ÀtçwÊétª°°PÕÕÕ’¤¿x¥ß(;y¸fNä \†$©ªªJ¿ûÝïÌ.½ÞpèÐ!:t(èþgŸ}Vv»½ßÖ"..NgΜáC B‘üÛ¯•+WjçÎzï½÷$ùFd¼ûñ·úsãßõ ózÅEQSÀÐE¸ IÒ§Ÿ~Úi°<â;#:=OWžãñxúu¸¼zõjÕ××ó¡™èèènýÛsß}÷ÉétÊív›ÝQsú’~}àkº˜Cá2$ùþÜãñŒÅ¸aäõ=v”$iTL”'íô<‰“ƪå˯t®ÅTÕtN__øÆ|<))ÉœgÚ_EGG+99™“Ýn×êÕ«Ãv1/™6\Q70‹0´ »téÒ%ÊÉ7sù7¿ùM@Àœ6ûf›šØ­óø¸AGö|b~””¤§žzj@Í\€öêë뺘%é;×K?q^¯[mÌ翾¤ÿ:yIû†:½¥ºéïª9í‹·~üãë¾ûî£(À5B¸Œ½0,ìÿVîܹSÿùŸÿpÿ­¶az0%|óÏÿ®w?þ–W á2pm1(¢££õÔSO)))ɼïÈžOtâã†.Ÿƒ`ÀPø·rÑ¢EzòÉ'gÞÿçÆKZ»ïý¹1ôïm?j X®¦øøxŠ\Ct.#¤îv0,Šÿ^†êbþAÒ0Í›ØÅ¼±âkÕžñÝž>}:ßø½Èn·ËétRà"\FX‘̈²ªª*ýîw¿ÓùóçÍû⢤Ãåˆ÷ý¡¸üä“O²y(`@c,ŠdDÁ2€¡.99Y¿üå/•’’bÞwæ¼´éзz÷ãouþk~— \è\F§:ë`&X€@Gn·;¨‹YòÎË€Îetª£f‚eæt:Cv1Ÿ9OmƒËè²PÌþ– XEE…Š‹‹º˜%:—Ëè²PÌ‚e-==]«W¯ÖäÉ“)`P!\FDBÌËбøøx­\¹R .TTT”’’’d·Û) `@c,º¥­­Mn·[’är¹–€!†p1Æb"F¸ ˆá2 b×S‚áôéÓÚ¹s§NŸ>M1ô‰øøxÝwß}Ч€py ðx<:tè…Чìv»î¼óN ‹1P´µµQülú :— ¸Ó§×Äx ׯ™±ñ:Ã( ÐáòtÃ…‹²´¶Rׄ×j¥ c1£sÀ óíðá:%I:7š®Ûž:g¹RÃêêj½÷Þ{e€›>}ºâuè!ÂeƒÂÅ‘#Õ3Z­V«Zbc)ÈURUU¥ªª* 1ÞǧžzŠBz„pÀ€öíðájLLÔ_n¤@}úé§Ðc„ˬSÿ#Q§¾û]ýýúàm¢ÇØ4ü†‘ŠIœH¡€ËN+£€^C¸ `ÀùvøpLJÒÙ±3c­7Ž×“Óc›¨‘Fcí.zá2€åÛáÃUý½Éú[t´y߈Q1rdÌSŒ.e€k…pÀ€*X¶Mù¿4ñÿ¼—â\c„ËŒÆË“fÌÕŽT Ю£‚“IIj‰½2G™` o.è÷ÎGE©)áFó뤔L‚e€>F¸  ßûßö$óöûÙ?¤(}Œp@¿Öj±¨Õj5¿žølÞÐ.è×¾ô‡ñÝIN´ÄR€~€p@¿Öj±˜·‡Ð.è·Zbcõ÷믗$C×2@?B¸  ßj‹Š2oÇÙo¦ ýá2€~ë|ô•p9&ñ& Ð.è·¾>ܼ=|ÄH Ð\O 0Ô××kß¾};v¬~üãS ‡—ô[ç¬VóvŒm"ôȾ}ûtèÐ!IRSS“\.Ez€±šššÌÛ‡’Ûí¦(@.`H"`z†pC3Ð}„Ër¢ÇØÌÛÌ@÷.`È™ðÙúî$§ù539Âe IŽŒyÌ@.`È"`ºpC3Ð=„Ëò˜È."`"u=%€¾s¢ú¸ÚZ¿ ûøX›]cíWåš‘œ»©¡^MõжŒÖ¸É·˜÷W­$MIM¿¦ëió¶èDÍÇA뀞rdÌ“$ýµÖ#É0K’Ëå¢8@;„ËЇ6þ|±šê;}Þì…hÎCO)ÚÓãk¾^𜪎VhÉ3ÊH\Ô¥cöoÑÞâ­šóÐJ3Ì=Q}\ë–ÍS”e´^}ÿÓk¼ž­ÚñÚzÍÈ^¤‡Ÿ-àƒ W0]C¸ }¤ÍÛbËvÇÔÁq›·Eõ5kOñ]Ò%=¸üÅ_·ªÝÆ'ª?–$›<5Ä}·ôÊzüÏÝùzŽG| D‚€èá2ô#œ•¤Šö…}Þží[ôÆ+ÏioñVÍ^øHÆd¡l”etDç¹_ ’§¤¦kÕÆwzm=‘„Ô9KVjöÂG—\UÌ@ÇØÐúˆ1¯8¹“âÙ‹1owe„FGºÓm\yÄ·ÎöôØD»¦¤¥÷0\î^÷ó¸É·hJZz¯Œ €Ž°ÉËÐG*»1¢­µÅ¼½ãµõ’¤»ï_²¶y[´÷Í­’¤9­¼rÍ#WFb´y[´ãµßèDÍqsÜ¢äÔéJ»#;èšWFPÀÆõýÏo\ûàîbU­P[k‹ÆÚìJNMWƽ‹ÂžÛÑQy¤BwoWSc½Æ9nÑÝ —…×'ªëè‡¥Š·ÙÎ鿞6o‹Ž|Xª£–ª­µEi·gkFö°atå‘ ý°T'jŽk¬Í®».ոɷ¨òH…ª<Jv¦kJZú ÿLž>}ZÅÅÅòx<ü ø¡ƒpúH}M׺v@Øÿ¹'ª«d[èp×÷øÇ*Ù¶^vÇÔ€Çk65ÔëŸîÿGµy[Ìkì)Þ¢Ù ÑË_:—8£Ù¸~”etPx½ñ_›ç5”ïÚ®½Å[µjãÛoÕQ_@36Ñ®uÏ x­ÆšÖºÿP£#”jÇkëu÷Â¥’™¯Çx½©·gm”Xy¤BG>Ü­Uß XW›·EÿeqÀuõ.y¦@'ªkoñV-yfhlèñx–0˜`ŒÅ€>ÐÔPo°u.·y[ôÆ+ÏIòÏ0ºxÀ7ÜHÊ3’}Çù:…îÞ®»ï_ªÍ¥U*<بžðÊ{Š·­F í.ûw@û3‚åžxA…Ux°QkÝ”Ý1ÕH_î.n¿ž7^yNç¾jÑã¿*TáÁFm.­2_[ù®í¾6ã<£FÇhÝãó•ìL×Z÷Ux°Q«6¾£(ËhU©xmmÞ­{|¾*T(95]ÿúÖ¬wÛKËuôÃÒNߣÁ¤­­ÿ81$Œ£ÛĈcDˆÎeèF*…î\nó¶¨ÒsH;¶­7Ÿ›ã×!|eTEèÐ3ÔÈ ÿ`uÎC+•³äÊùf/zD'ª?ÖÁÝÛU¾k»9¢ÍÛòZ¡f%Ÿ¨>®6o‹’SÓæD›|‹r–¬ÔÆŸ/ÖÑKõàòƒÖ“윮'^v›_G[c4%5]UG+6>”ü;¾ƒ×Sy¤BKž)—1%-]ã&ßbnJh(yÍWÛäÔt=½é€õ>½é­\ð³û9ҙЃARJ¦ìÎò+ÐÌÀ„ËÐ*ý‚ÎÅ3l>7Ê2Z.1`æo¨QþÌnã´ÀQ’o³ˆ´;²ÌyÇí¯o³Œ³ÕÍl¨:Z¡ò]ÛÞ´;²ƒFR뉲ŒÖÃ!ÆNó¥ý¯Øñ}KP=ÃÍv>ݸbSC½öûfR‡ºv´5Æ ¤;ÛpÀÐÓ溺:}ñÅ>oüøñš0aÂU[‡ÇãQKK‹bbbät:#>þÀ!×ÙÓó^«uu„ËÐÚwã¶o³kl¢]i·g)íŽì MíªÂŒ½Â°Æ5g/\òšÑ–àÍîÂm:ª›yÜä[”z{–Ž~Xªm/-×Þâ­º{áR3ìm¿!ž±ž´Û³Cn´g<îÿÚýCäPõõÚÚ¼-f²±†#ì–$ÙSƒj{åõLUÕÑŠ°>€¡­¯f·Û­µk×vùù999Z¾|¹233{uyyy:pà€fΜ©²²²ˆ7ÖóüóÏkÍš5½vÞkµÎ¡Žpú€†>þ«B¥Ý‘ѱÆ8‰(ËèÁhØÖã»?ÜõšÚu÷Jþ!ò-A×oßÍ,IO¼ìVɶõÚS¼E'ªkÛK˵ãµõzà‰‚®kt?§Ý‘ÕaÚo$î¾p¯Í¨‡Ý15辎jeôÇT>°BH#2JJJTRR¢ÂÂBFx ×.À5ÖÙ¼å®îX#üõEý»wÃuê†:.äf~aº™ 9KVjöÂ¥ÚS¼U{Š·¨©¡^¾X<ñ‚9‹Ù–s¨Î`ÿ‘þë øv¶¹a¨ùÐm­_uX ÿ×>ç-èºþ0oذ¡ÃQn·[EEE’¤Å‹Ëétöʨ‰«ÉX_YçÌ™3%‰‘í.º,Å•kÞ>æ.êÒ16§S ΔˆŽé•oð²³5*áF;õ¥jvïz<ÎáPòüùŠ›ìPœÃ¡S>{$ux\bILÔ¤{fK’NyŽ©ÑãáC „ÿãŽÂÍÎŽʶy[t´¼TRè‘áøgœ7Ô8 ß¹‚CáÊ£jkýJSœÓmQ´5Æ ™_/xNwo׎Âߘár¸YÎí×Û>Ø 5¤£ÚX›ï˜à0Üx}í•ïÚ®6oKØîpøù£f§ÓÙḋÌÌL9N­X±B’”ŸŸ/·ÛݯkšŸŸß¯ÖÓ£9£ë( «ns¹ÌÿM[¶¬KÇÜètšÇô¶‘V«R\¹!Ï}SÖ=ºÍåÒMY÷=çpè® ¿•#;Kq‡$)ÁéÔ¯·ÃãúŠÍéÔþ°5è~KB‚YÛûy×.¿g‰‰š¿A6Þ/`È Õ! c|E¨ã÷oõ›·<B’ù¸?#L·ÙÍQ¡ÆIH¡;z×-›§O»‚BìhkŒ~¶À¼®æV†yªF¡Æ_t¥›9Ôzýr㹡Âå6o‹v¼¶>è5@GóôÝIW¾Ï;tèP¿ pýƒî;vð†¡W.ºåæóû< »wË–n…Ö·-vi¤Õ*Iª--Õ1w‘޹‹t¶¶¶ÿ}ƒš­»ó7˜!8®8‡Csßx] ËÔ{ãŒÑ†½—ÇPH¾6Two”e´Þxå¹€ãNT׎ÂßH’~¦Àïþã$Ât3#)ö¾ü ÑP3¢CmºFSƒÎÓ•nfC¸Í Ón÷èwoWù®íÏ_÷øü+¯‘ÍüDòý{?˜cccÍÑÍÍÍæýG³fÍÒ¬Y³äéà¯!óòò4kÖ,åååux’’Íš5KÆ Ó˜1c4wî\s$G$:º^ss³ Ìë 6L©©©]«¬¬LsçÎÕĉ5lØ0Íš5K+V¬P]]]Èç5jÿ~ë4î/**Òܹs5fÌó¼5¥¨¨H³fÍ XñZÜnw—jß‹è¶é«Vi×#è‚×Û'×·$ÚÂ>vxã&°Xt±µ5豋$élM­*^^×åãúBtBBØÇÎÖÖjožïÏÚZOâÙÏŸ;:ãЙq“§ªêh…Þxå9ÕW¬hëhù Tm8¡œ‡VêWž `=¾?Ñ~ø™müùb55ÖkJjºšêÍpuÉ3q¨qFàܾ›9ç¡•Z÷ø<ù`·Ö=>Ïo´ÆW*ßí;ÿƒË_4ŸßYÀzã¾óŸ;ÙÜÐ8OûyÌSÒÒu÷Â¥Ú[¼UÛ^Z®½Å[ÍçÛSmQ›·…p@Äúó&_|ñEÐ}ÍÍÍæÈ‡ŽPÇ£èÒ¥KaŸ³xñâ€ðµ¹¹ÙÜHÐívëÝwßUlll—Öîzuuuš;wnPîñxäñxTRR¢üü|íß¿?ìµV¬X4v£¬¬Leeer»ÝÚ¿Ьg£FF@ß~3gÎÔ¬Y³‚Æg´?oû5577kîܹaËÏÏ×Ì™3UVVÖaíû Ë€n³$Útknn¿\Û™š5z<:SSÓÁsª»u\qÁëU£Ç£FG­ | `€0ÂP©ûc1rZ©äÔtµy[´§x‹J¶­W¼-I/¸÷™ÏñEîÝäÔt¥Ý‘­%Ïè‹O«dÛz•ïÚ®x›]ÿªP÷. ¸N¨¸2L—ð”´t­uÿQɩ骮sÞÍyh¥~¶ ähèªþØÁìñxÌ®Üöio8pà€Ün·bb´Z IDATbTXX¨£Gjÿþýš3gޤ+Â=µxñby<ÅÄÄhÆ Ú¿е<OØ™ÍP~~¾Æ¯ÂÂBíß¿?àX#ìTAAÊÊÊ”’’b®«°°P)))æšBuûËsæÌÑ»ï¾k;~üxy<ôÛÏ:Ë€ˆ­©Õ –Q²ØlºyÁ|,/ï•ÍäâÝ0j”$éÜ—_^õÀ´µ±gݾ —¿Iøúܹ.‡Ñ#­VÅÞt“ùu$Çö„%1Q£n¼±G×4^¯$:v¬×Öæÿ¾KRógŸu¹Þ¿ž‘×ßëâ_“kõpíLIKWáÁÆ#Ú£§7½c†´ã&O5CØÙ‹17Í3ŒM´\3ãÞEJ»=K'.w=‡ f7—VÝ—³d¥r–¬ ùüq“oÑÓ›Þ‘t% ›¼iaûõtµFkÝ º/Ôëõ÷àò:¦Û˸wQP¨ntrÌÐý©ƒ¹®®N‹/6¿¾Zk0ÂPÿîÜÌÌL¹\.™Ý¸m>ØÙë0‚Øüüü ×‘™™©ÌÌL8p@EEEZ³fMÈ󤤤¨¬¬,h999Ú±c‡êêêTWW§ &tymÍÍÍÊÍÍ ú%BNNŽœN§¾øâ‹ Y×%%%æë wlff¦ŽõâÏ_½p±‹çZuxÓ&Ý¿ARÏÇc¤¸ruÓ=YAc.ZõQQ‘jKKˆ,ÈôR\¹Jqù:¨ÿ=sVÀsN;¦=Ë}¿þŸeûƒ®kwÌ]¤ÜîÇù³$&êÖŸþTöŒæÜfÃ'o¾¥?ÿÛ¿…¬Ã¤¬,MY0?äìä ^¯ªÞ~[ÇÜWfƒÙœN³¾cýÆZýŸcÜêº·åæ†¬måÛoé“·Þ:Æÿ¼ÿž9K7/˜¯[ss^¯±æÊ·ßéÖûÞQ%é”ǣÛ6‡ UmN§nsåÍ/®Ù]ª#¯¾ª™¿xQ ))aëbs:õýe½‘ÖeÊüµ½àõê³Ò÷>¡ÞKãëöŸ³W®’çϪIkC£>{¿´Ûõ0x… n»*Ú²k¸·\Ís÷Tå‘ í(\¯hKŒÿUaÐã/ñ066€îºVsQQ‘8tss³<OÀÈ…”””«.»Ýî£(òóóURR¢––åçç÷(\îÌš5käñx‚ÆZ´N¨uæåå™p¤á²ñ:Û‹•ËåÒÚµkÕÜÜp^#L?~|ÈÎöØØX•””hâĉýö3N¸ è–FGŸ¼õ¶n^0ßqxÓ¦ˆÎ1ÒjÕÌ_O;¦£,ã˜$K¢MéO¯Ò)):´n]Ÿ¿æ8‡Cwmø­üµ66êÜ©S3i’FX,ºùþJHuê+ž §¯Z%Gv–ùµqœ´Zu›Ë¥§S{.ÏPî í¯{¶¦Vϵ*!%E–D›¦-[&{F†¬~.l`Ùþþï]w×ܾޯû>*!A›/¨Mp:u׆ßê+ž ˜ÙÙš¾êŸÍ¯/¶¶êbk«,6›ÙYŠ›Üñæ‡Ó{L7ß¿ èý3i’Y—§S‡Öý:l]¦-[¦›ÌY—›ï_ ¸ÉŽˆëÒ¾ÖF´ñ~]Ï UÑÖÑfgõ‰êãÛ;^[¯Ê#вŒÖì…K)€»sWGnÌœ9S%%%WåuÆÄÄ„ ccc•““6ïªÌÌLÅÄĨ¥¥E+V¬Ð_|¡ÜÜÜ€Øè^îHNNNØuºd·¯m¸ÏíÏk¬×²;ú,L˜0A)))ý¶{™pÐm.*’=cF·Çc¤=ú¨,×––êO›_5ÃýéUaë8ÒjÕ­¹¹ºy¯{wÊ‚ù›.Z•öèÏ$ùBåÏ®6¯=Òj5;–Ã~cŸmËí_—$³ÞöŒ µ6ž û “›ÌשcÇôQ¡Û¼~œÃ¡ô§Ÿª‹ñ^úw0ïÍ[P3{F†ù~Þ¼YŸ¼ùV@Müß/Gv¶jvïæ èq“oQrjºªŽVèy×]f¸lÌo޲ŒÖÃÏ0@¯¹ÚsJJJ‡å#:êæí©ÎÎmªmØyyyfðš5k´fÍM˜0Á!aÌNîékˆ4\Ž´6þvÖ!=a„~.³¡ Û.x½:äüM_µ*䈃PÚ‡®/¯ èmmhPÙ3Ϛݛ¾éÄ>{­Éóç›ã<»: •|#1Œ±öWF=L¹d^lmÕ¡—×Í‘nôxtÌ/àNHíù7{–ÄD³«öÔ±c*{æÙ€ë^ðz}¡îÁƒ¾ot³³d óàÙšZíYžÀ6z<¡ë|ƒçp˜£(þ´ùÕ :^ðzuxÓ&­©õÕ#%ðÜ·åþÔ ¦ÿ×ÒGÚ ^¯ö,Ïëpîñ­?ý©ïóÕØ¨Ï®êŠþÈí6ßÇŽ>s­Ú³w«ÍÛ¢žÝ'×/ßµÝ÷ÿ–+?yË0'8š¿A‡7m6ÿÔßætêûË3;koÞlÎã5s›§ÌŸ§³ÕÕaµªö*…¤Ÿ•¾¯)óçËb³iúª–Å– ÏÞߣ ^¯aù'o½­Ö†µ64¬ñK'`β}Æ Ý¶Ø6H•¤6¿1ó/ê“7ßÒ×çÎu8á‚׫Û7kÚc¾úݵá·úÓ¦ÍæµãM[ö˜.SóÉ[o_³ þ£)¦¯Z¥#¯¾j©–ÄDM™7O7ß¿ ìñþ¿¸˜¶l™œNÕ—ÔE¯W N§nʺ§Ã ïoÚ¬Ì_¼¨‘V«æ¼þúÓæWuòàAó}œ2žÙY’|ÝÕW«[~Ê‚ùºté’FX­ª//Wå›oÉ>c†FZ­Aï—±.ãóuêØ± ÿ +šêewL•$›|Ë5¿¾Ñ-e­±‰ö€Ç’SÓ5%•pÀàp­:–'L˜Ði7q¤bcc•““rÓ½p× –†;G(áÂà®æ…ÉÝ9_GëéÉ:½=·Û­––Íœ9Óçð¦Ma¥IYYJp:ÆÑÇÜEúäÍ·‚î¯//פ¬,´Z•ùÒ/$ùF\ ô‚׫Ï®ÖÌ_¼(‹Í¦iË–iÚ²eAÏ;uì˜þì7‡ìO›_ÕôUÿ¬‘V«îÎß  ^¯škkÍ`×x}¶T§RRdKuê#÷•ó„ѲgdèÔ±cÚ³<¯Ãõ~òæ[•`ÓÍ æ+ÎáÐÝùB>¯¶´T‡7mºfŸ‹ª·ßÖ¤¬{d±ÙäÈÎ’#;Kgjj4b”Å ÙÏÖÔªòí·5ýrguûùÂ{W¬ÐÌ_¼¨„”³&þ­ûµyl¨ÏŒñøH«5ì cFvojôxÌ_6ë>SS£úòr5z<ª--Õ¤¬¬߯³5µ:ð,6 {Æ&Úõô¦wúìúÆ(Œö!òƒË_äÍ0h ¤Qè{%%%Ú±c‡&L˜ Ï?ÿ<äsüÇ{.´S—7èê|ÞÜnÅMvt8¶âåuú¬ô}ݶإ„ËjKKõYéû«¿?m~U’/ta±˜Ùù¯1ÔZûÚBŒèð¸šíZúˆ’çÏ7Rƒˆ¶Ÿ#\³{·.x½Jq¹Ì1! N§oÓ¿òr}TôojmhPÛ©+s‚ýÇ"´64hoÞ M[¶Lc“Î}±µÕ|OB½–Û6édy¹¦Ü¿@ö³¢ëTå›o…¬­ÿyýkI­Â¹àõj×ÒGôýÇÕ¤,_‡pœßf£†#­VÝ”u$)!Õ._ðzµgyžÙÙJʘa~¾ÎTûæ·64„ —÷äLuµù™óÿ|¶66ª¶ô}U…˜‘ÝuÙ›·BÓ_f~Öý¯]ñò:Õ—Ԥ쬠÷«£u€äëJnj¬ï•îßÊ£жŒîrws›·E'j>îô˜ÕKê~×ô‰êãjkýªË¯±ò¨¯SšŽh× Á2"•““£;v¨®®NsçÎÕ† ÌîðææfhÍš5’¤™3gv¹üZvéÒ¥K¼•ýßÎ;õÞ{ïù~ÈþKƒùsXƒ”1Ò` h–ÄĈF´ïÂêµiµê‹¥×Ç<üÏ2ß<³cî"}Ô…<úò}éÍÏ0y¾ŸfÞžžû‘/Hþ§?P¼Í׉¼í—Ë6蛽ð=°<¸V//›§ª£züW…J»Ã÷KÍ=Û·èWžÓœ‡VjÜä©zã•çÔÔP/IжÆhÉ¿ä›Ïm¯|×víxm½ù|É×ýÀ/„<æŸü@M õZµñMIK¸~êíYzâå+ÿ^/žaS”e´^}ÿSí-Þª=Å[ÖõÀ/(ãÞEA×8Q}\;^ûŽ|øËÞÙ Ñœ‡žR´5fÈ~=gÞþýïßáÏžI)™²;Èt@,£»\.—Šüþú5cCÄþ8sù:ÞB@rÁëP™‘}`öÇÚ^ðz#ªáÜÿç ÍÎß GvvØçØüþTìË0]ïýé}éÍÏ€¡Á/1Ê£çߥK—¤9­ÔŒl_غ§x‹^/¡Su¹“׿sØè&®¯ùX¾XÉÎtÍyh¥’SÓÕæmÑÆŸ/ j%iÛK˵í¥å:çmÑÝ —jÎC+u÷Â¥jj¨×ÆŸ/67î3´y[ÌpxÜä©A×÷_“”OIM×¶—–ëõ‚Õ²;¦¬Ë?7”ïÚ®uÏבv+9Õ÷:æ<´Rñ6»öoÑÆẎÀUA°Œžp»Ý*,,TJ»¿ä•|¡ò† ÌÍû#Æb€ãÜ©SJp:5*Á¦úòò °|¤Õªï/{L’otEcÃeHŒ@öDõqݽpiÀÌâ´;²´ñ狵·x«f/|ÄÜ8ϤÛo¦W_ã;בvk­û!ï+O»tôÃRíxí7È%ÛÖ«|×v³sÚÿ|i·gkÝãóôÆ+Ï)íö,³SØXs¼ÍÐ=l\ßt…±Ö*Ï!Å'$é_ßúï€k<—{§êk>VSC½ySC½¶½´\’´ä™‚€®æÙ —ê9×]ª/äsÛÏBö¿~Àl壡»©ãÃmØ'Ž÷0fBG[cÌæöÚ¼_ñá醿ÖzÂ3`0~Ãw”¶àI]?â;K° \A¸ œÖ†}òæ[æ˜ *Bu ·w¾Õ¤†šc8~âr7qZè@ĄĕÙɾcâmv]ºúúñ6»âmv¥Ý‘%É6±ÿµ:‰o³Ãð_SÈìQ£Ã®)Ê2úr·õT>Dˆ§¾ýúo:w¦A1¶‰G° "\€"Ôx M õA¡­ȆÛLÏèJn϶s{Ooz'â5³ Û_¿³næÀsw`£.2î]r¼"ãt:õé§Ÿª©©‰b`P:yò¤ÎŸ?ß­c –`„Ë0@áªÿf}þöo‘ä „ 9ÔfzþÝÄm­-!Ïup·o¾r¨ñ•G*Bv<ïÙ¾EGËK5gñJ¿yËÁ!²ÿø‹Îº™ý] ¤¯<>ÖfWU‡5;®7^yNÉÎtÂç.ˆŽŽ–Ë墴֯_¯êêêˆ#XB»ŽÀÀ`lfw:ÄÆtM õ:¸»X’”óЕ5ôfzÇÍÛG>( :מí[ÔÔP¯x›= \6º˜C…ÛFˆ[y¤"`¤E¨pÛ¸¯+ÝÌí¯Ñþµá´†ûkó¶hÛKy#> RË@x„Ë0øofwÎÛ¢?_lvÿV©Ðó‹ïR›·Ew/\Ð *°õŸm|p÷ví-Þj>¶·x«Þxå9IÒÃϬaöÂ¥’¤¯­7ió¶hÇkëµîñù’¤%Ï„Ëæf‚ÊÁ÷…›Íl7Þ#íö,ÅÛìjj¨¨É‘Kµîñù:Q}\vÇTsí ‚e cŒÅ€À3»Œ{iÛKËuäƒÝϹ{áR=¸üEóëpã'ŒÎáœ%+Uy¤B¯¬Öë«ÍÇ£,£õð3A!oÚÙšóÐJíxm}Ð1’ôÀ/t:75Ô«ÍÛb®»ýõïг™Û?Þ~dF´5FO¼\¨——ÍÓ‘vÕ$95]Oüª0 €® X:G¸ €ÿfv÷.ÒX›]å»¶«©±^c/¯h·µ~¥9­T´etÀýþ³‹ãÊwm7Ï?{áÒ°alÎ’•J»#Kå»¶¯)1xsÀ9­ º?íö,MIMom‰ ù\ÃØD»æ<´2äfã&ߢõoý·Êwmב}c>ÂÕº‚`èša—.]ºDú¿;wê½÷Þ“$%ü¥A‰ ƒžçûiæíé¹/ éZ<ïºK'ªkÕÆwz˜65ÔëŸü@’Tx°‘Ùs¨è9óöïÿ{ ‚!ÇC¿ÿãžÅбM zÁ2ÐuÌ\€ ÜXˆH#)’S J =‚e 2„ËÐÏ…ÛÌ®;z+¤€Á†`ˆá2ôsW6Àëy lÌIößL†:‚e {ØÐú¹Œ{iJjzØÍî"‘óÐJIt.€`è>ÂeèçÆ&Ú{%X–Ô£Í`°!Xz†pCNÝíVÛÙFók‚e rÌ\ÀC° ôá2†,‚e û—0$,=C¸ €!!::Ú¼M° ôú0´4~nÞž3oÛív è1Âe†€³'+ÍÛßûÞ÷( Ç—äÎiÔ…ÖfIRTT”’““)  Ç—äþ¿ ó¶Óé¤ €^A¸ À vîL£þZë1¿ž>}:Eô Âe±ºÿÚeÞNIIa$ ×.0HÕ”¿£¯NÕ™_/Z´ˆ¢z á2ƒÐ—5GÆa,\¸PñññÐk®§ .'•©ÞóŸæ×Ó§O×wÞIa½Šp€A⛋SÝí èXNJJ’Ëå¢8€^G¸ À ð×Z>ÿwéÛ¯ÿfÞ—’’B° ¸j— ÎiÔÙúJ}YsDZ›ûáÈ~€«ŠpÀ€pòXEÀ÷Í…ó:w¦A’/XöïR6ÄÅÅiÑ¢Er: pU.ü7',**JwÞy§î»ï>Џ&—ô[£››õUl,…ˆ‹‹Srr²œN§¾÷½ï)::š¢®ÂeýÖøº/ÔÛ¢ #FPŒ>ÔjµèœÕ*Iš`_ 3. If you are relying on this backend to bill customers, you will note that your capacity to generate revenue is very much linked to its reliability, which seems to be a factor dear to many managers. The following is a table indicating the status of each database drivers: ================== ============================= =================== ====== Driver API querying API statistics Alarms ================== ============================= =================== ====== MongoDB Yes Yes Yes MySQL Yes Yes Yes PostgreSQL Yes Yes Yes HBase Yes Yes, except groupby Yes ================== ============================= =================== ====== .. _Gnocchi: http://gnocchi.xyz ceilometer-6.1.5/doc/source/install/upgrade.rst0000664000567000056710000001000413072744706022715 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _upgrade: ========== Upgrading ========== Ceilometer's services support both full upgrades as well as partial (rolling) upgrades. The required steps for each process are described below. Full upgrades ============= The following describes how to upgrade your entire Ceilometer environment in one pass. .. _full upgrade path: 1. Upgrade the database (if applicable) Run ceilometer-dbsync to upgrade the database if using one of Ceilometer's databases (see :ref:`choosing_db_backend`). The database does not need to be taken offline as no data is modified or deleted. Ideally this should be done during a period of low activity. Best practices should still be followed (ie. back up your data). If not using a Ceilometer database, you should consult the documentation of that storage beforehand. 2. Upgrade the collector service(s) Shutdown all collector services. The new collector, that knows how to interpret the new payload, can then be started. It will disregard any historical attributes and can continue to process older data from the agents. You may restart as many new collectors as required. 3. Upgrade the notification agent(s) The notification agent can then be taken offline and upgraded with the same conditions as the collector service. 4. Upgrade the polling agent(s) In this path, you'll want to take down agents on all hosts before starting. After starting the first agent, you should verify that data is again being polled. Additional agents can be added to support coordination if enabled. .. note:: The API service can be taken offline and upgraded at any point in the process (if applicable). Partial upgrades ================ The following describes how to upgrade parts of your Ceilometer environment gradually. The ultimate goal is to have all services upgraded to the new version in time. 1. Upgrade the database (if applicable) Upgrading the database here is the same as the `full upgrade path`_. 2. Upgrade the collector service(s) The new collector services can be started alongside the old collectors. Collectors old and new will disregard any new or historical attributes. 3. Upgrade the notification agent(s) The new notification agent can be started alongside the old agent if no workload_partioning is enabled OR if it has the same pipeline configuration. If the pipeline configuration is changed, the old agents must be loaded with the same pipeline configuration first to ensure the notification agents all work against same pipeline sets. 4. Upgrade the polling agent(s) The new polling agent can be started alongside the old agent only if no new pollsters were added. If not, new polling agents must start only in it's own partitioning group and poll only the new pollsters. After all old agents are upgraded, the polling agents can be changed to poll both new pollsters AND the old ones. 5. Upgrade the API service(s) API management is handled by WSGI so there is only ever one version of API service running .. note:: Upgrade ordering does not matter in partial upgrade path. The only requirement is that the database be upgraded first. It is advisable to upgrade following the same ordering as currently described: database, collector, notification agent, polling agent, api. Developer notes =============== When updating data models in the database or IPC, we need to adhere to a single mantra: 'always add, never delete or modify.' ceilometer-6.1.5/doc/source/install/manual.rst0000664000567000056710000005416013072744706022556 0ustar jenkinsjenkins00000000000000.. Copyright 2012 Nicolas Barcet for Canonical 2013 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _installing_manually: ===================== Installing Manually ===================== Storage Backend Installation ============================ This step is a prerequisite for the collector, notification agent and API services. You may use one of the listed database backends below to store Ceilometer data. .. note:: Please notice, MongoDB requires pymongo_ to be installed on the system. The required minimum version of pymongo is 2.4. .. MongoDB ------- The recommended Ceilometer storage backend is `MongoDB`. Follow the instructions to install the MongoDB_ package for your operating system, then start the service. The required minimum version of MongoDB is 2.4. To use MongoDB as the storage backend, change the 'database' section in ceilometer.conf as follows:: [database] connection = mongodb://username:password@host:27017/ceilometer SQLalchemy-supported DBs ------------------------ You may alternatively use `MySQL` (or any other SQLAlchemy-supported DB like `PostgreSQL`). In case of SQL-based database backends, you need to create a `ceilometer` database first and then initialise it by running:: ceilometer-dbsync To use MySQL as the storage backend, change the 'database' section in ceilometer.conf as follows:: [database] connection = mysql+pymysql://username:password@host/ceilometer?charset=utf8 HBase ----- HBase backend is implemented to use HBase Thrift interface, therefore it is mandatory to have the HBase Thrift server installed and running. To start the Thrift server, please run the following command:: ${HBASE_HOME}/bin/hbase thrift start The implementation uses `HappyBase`_, which is a wrapper library used to interact with HBase via Thrift protocol. You can verify the thrift connection by running a quick test from a client:: import happybase conn = happybase.Connection(host=$hbase-thrift-server, port=9090, table_prefix=None, table_prefix_separator='_') print conn.tables() # this returns a list of HBase tables in your HBase server .. note:: HappyBase version 0.5 or greater is required. Additionally, version 0.7 is not currently supported. .. In case of HBase, the needed database tables (`project`, `user`, `resource`, `meter`) should be created manually with `f` column family for each one. To use HBase as the storage backend, change the 'database' section in ceilometer.conf as follows:: [database] connection = hbase://hbase-thrift-host:9090 It is possible to customize happybase's `table_prefix` and `table_prefix_separator` via query string. By default `table_prefix` is not set and `table_prefix_separator` is '_'. When `table_prefix` is not specified `table_prefix_separator` is not taken into account. E.g. the resource table in the default case will be 'resource' while with `table_prefix` set to 'ceilo' and `table_prefix_separator` to '.' the resulting table will be 'ceilo.resource'. For this second case this is the database connection configuration:: [database] connection = hbase://hbase-thrift-host:9090?table_prefix=ceilo&table_prefix_separator=. .. _HappyBase: http://happybase.readthedocs.org/en/latest/index.html# .. _MongoDB: http://www.mongodb.org/ .. _pymongo: https://pypi.python.org/pypi/pymongo/ Installing the notification agent ====================================== .. index:: double: installing; agent-notification 1. If you want to be able to retrieve image samples, you need to instruct Glance to send notifications to the bus by changing ``notifier_strategy`` to ``rabbit`` in ``glance-api.conf`` and restarting the service. 2. If you want to be able to retrieve volume samples, you need to instruct Cinder to send notifications to the bus by changing ``notification_driver`` to ``messagingv2`` and ``control_exchange`` to ``cinder``, before restarting the service. 3. If you want to be able to retrieve instance samples, you need to instruct Nova to send notifications to the bus by setting these values:: # nova-compute configuration for ceilometer instance_usage_audit=True instance_usage_audit_period=hour notify_on_state_change=vm_and_task_state notification_driver=messagingv2 4. In order to retrieve object store statistics, ceilometer needs access to swift with ``ResellerAdmin`` role. You should give this role to your ``os_username`` user for tenant ``os_tenant_name``: :: $ keystone role-create --name=ResellerAdmin +----------+----------------------------------+ | Property | Value | +----------+----------------------------------+ | id | 462fa46c13fd4798a95a3bfbe27b5e54 | | name | ResellerAdmin | +----------+----------------------------------+ $ keystone user-role-add --tenant_id $SERVICE_TENANT \ --user_id $CEILOMETER_USER \ --role_id 462fa46c13fd4798a95a3bfbe27b5e54 You'll also need to add the Ceilometer middleware to Swift to account for incoming and outgoing traffic, by adding these lines to ``/etc/swift/proxy-server.conf``:: [filter:ceilometer] use = egg:ceilometer#swift And adding ``ceilometer`` in the ``pipeline`` of that same file, right before ``proxy-server``. Additionally, if you want to store extra metadata from headers, you need to set ``metadata_headers`` so it would look like:: [filter:ceilometer] use = egg:ceilometer#swift metadata_headers = X-FOO, X-BAR .. note:: Please make sure that ceilometer's logging directory (if it's configured) is read and write accessible for the user swift is started by. 5. Clone the ceilometer git repository to the management server:: $ cd /opt/stack $ git clone https://git.openstack.org/openstack/ceilometer.git 6. As a user with ``root`` permissions or ``sudo`` privileges, run the ceilometer installer:: $ cd ceilometer $ sudo python setup.py install 7. Copy the sample configuration files from the source tree to their final location. :: $ mkdir -p /etc/ceilometer $ cp etc/ceilometer/*.json /etc/ceilometer $ cp etc/ceilometer/*.yaml /etc/ceilometer $ cp etc/ceilometer/ceilometer.conf.sample /etc/ceilometer/ceilometer.conf 8. Edit ``/etc/ceilometer/ceilometer.conf`` 1. Configure messaging Set the messaging related options correctly so ceilometer's daemons can communicate with each other and receive notifications from the other projects. In particular, look for the ``*_control_exchange`` options and make sure the names are correct. If you did not change the ``control_exchange`` settings for the other components, the defaults should be correct. .. note:: Ceilometer makes extensive use of the messaging bus, but has not yet been tested with ZeroMQ. We recommend using Rabbit for now. 2. Set the ``telemetry_secret`` value. Set the ``telemetry_secret`` value to a large, random, value. Use the same value in all ceilometer configuration files, on all nodes, so that messages passing between the nodes can be validated. Refer to :doc:`/configuration` for details about any other options you might want to modify before starting the service. 9. Start the notification daemon. :: $ ceilometer-agent-notification .. note:: The default development configuration of the collector logs to stderr, so you may want to run this step using a screen session or other tool for maintaining a long-running program in the background. Installing the collector ======================== .. index:: double: installing; collector .. _storage_backends: 1. Clone the ceilometer git repository to the management server:: $ cd /opt/stack $ git clone https://git.openstack.org/openstack/ceilometer.git 2. As a user with ``root`` permissions or ``sudo`` privileges, run the ceilometer installer:: $ cd ceilometer $ sudo python setup.py install 3. Copy the sample configuration files from the source tree to their final location. :: $ mkdir -p /etc/ceilometer $ cp etc/ceilometer/*.json /etc/ceilometer $ cp etc/ceilometer/*.yaml /etc/ceilometer $ cp etc/ceilometer/ceilometer.conf.sample /etc/ceilometer/ceilometer.conf 4. Edit ``/etc/ceilometer/ceilometer.conf`` 1. Configure messaging Set the messaging related options correctly so ceilometer's daemons can communicate with each other and receive notifications from the other projects. In particular, look for the ``*_control_exchange`` options and make sure the names are correct. If you did not change the ``control_exchange`` settings for the other components, the defaults should be correct. .. note:: Ceilometer makes extensive use of the messaging bus, but has not yet been tested with ZeroMQ. We recommend using Rabbit for now. 2. Set the ``telemetry_secret`` value. Set the ``telemetry_secret`` value to a large, random, value. Use the same value in all ceilometer configuration files, on all nodes, so that messages passing between the nodes can be validated. Refer to :doc:`/configuration` for details about any other options you might want to modify before starting the service. 5. Start the collector. :: $ ceilometer-collector .. note:: The default development configuration of the collector logs to stderr, so you may want to run this step using a screen session or other tool for maintaining a long-running program in the background. Installing the Polling Agent ============================ .. index:: double: installing; agent .. note:: The polling agent needs to be able to talk to Keystone and any of the services being polled for updates. It also needs to run on your compute nodes to poll instances. 1. Clone the ceilometer git repository to the server:: $ cd /opt/stack $ git clone https://git.openstack.org/openstack/ceilometer.git 2. As a user with ``root`` permissions or ``sudo`` privileges, run the ceilometer installer:: $ cd ceilometer $ sudo python setup.py install 3. Copy the sample configuration files from the source tree to their final location. :: $ mkdir -p /etc/ceilometer $ cp etc/ceilometer/*.json /etc/ceilometer $ cp etc/ceilometer/*.yaml /etc/ceilometer $ cp etc/ceilometer/ceilometer.conf.sample /etc/ceilometer/ceilometer.conf 4. Edit ``/etc/ceilometer/ceilometer.conf`` Set the messaging related options correctly so ceilometer's daemons can communicate with each other and receive notifications from the other projects. In particular, look for the ``*_control_exchange`` options and make sure the names are correct. If you did not change the ``control_exchange`` settings for the other components, the defaults should be correct. .. note:: Ceilometer makes extensive use of the messaging bus, but has not yet been tested with ZeroMQ. We recommend using Rabbit for now. Refer to :doc:`/configuration` for details about any other options you might want to modify before starting the service. 5. Start the agent :: $ ceilometer-polling 6. By default, the polling agent polls the `compute` and `central` namespaces. You can specify which namespace to poll in the `ceilometer.conf` configuration file or on the command line:: $ ceilometer-polling --polling-namespaces central,ipmi Installing the API Server ========================= .. index:: double: installing; API .. note:: The API server needs to be able to talk to keystone and ceilometer's database. 1. Clone the ceilometer git repository to the server:: $ cd /opt/stack $ git clone https://git.openstack.org/openstack/ceilometer.git 2. As a user with ``root`` permissions or ``sudo`` privileges, run the ceilometer installer:: $ cd ceilometer $ sudo python setup.py install 3. Copy the sample configuration files from the source tree to their final location. :: $ mkdir -p /etc/ceilometer $ cp etc/ceilometer/api_paste.ini /etc/ceilometer $ cp etc/ceilometer/*.json /etc/ceilometer $ cp etc/ceilometer/*.yaml /etc/ceilometer $ cp etc/ceilometer/ceilometer.conf.sample /etc/ceilometer/ceilometer.conf 4. Edit ``/etc/ceilometer/ceilometer.conf`` 1. Configure messaging Set the messaging related options correctly so ceilometer's daemons can communicate with each other and receive notifications from the other projects. In particular, look for the ``*_control_exchange`` options and make sure the names are correct. If you did not change the ``control_exchange`` settings for the other components, the defaults should be correct. .. note:: Ceilometer makes extensive use of the messaging bus, but has not yet been tested with ZeroMQ. We recommend using Rabbit for now. Refer to :doc:`/configuration` for details about any other options you might want to modify before starting the service. 5. (Optional) As of the Juno release, Ceilometer utilises Paste Deploy to manage WSGI applications. Ceilometer uses keystonemiddleware by default but additional middleware and applications can be configured in api_paste.ini. For examples on how to use Paste Deploy, refer to this documentation_. .. _documentation: http://pythonpaste.org/deploy/ 6. Choose and start the API server. Ceilometer includes the ``ceilometer-api`` command. This can be used to run the API server. For smaller or proof-of-concept installations this is a reasonable choice. For larger installations it is strongly recommended to install the API server in a WSGI host such as mod_wsgi (see :doc:`mod_wsgi`). Doing so will provide better performance and more options for making adjustments specific to the installation environment. If you are using the ``ceilometer-api`` command it can be started as:: $ ceilometer-api .. note:: The development version of the API server logs to stderr, so you may want to run this step using a screen session or other tool for maintaining a long-running program in the background. Configuring keystone to work with API ===================================== .. index:: double: installing; configure keystone .. note:: The API server needs to be able to talk to keystone to authenticate. 1. Create a service for ceilometer in keystone :: $ keystone service-create --name=ceilometer \ --type=metering \ --description="Ceilometer Service" 2. Create an endpoint in keystone for ceilometer :: $ keystone endpoint-create --region RegionOne \ --service_id $CEILOMETER_SERVICE \ --publicurl "http://$SERVICE_HOST:8777/" \ --adminurl "http://$SERVICE_HOST:8777/" \ --internalurl "http://$SERVICE_HOST:8777/" .. note:: CEILOMETER_SERVICE is the id of the service created by the first command and SERVICE_HOST is the host where the Ceilometer API is running. The default port value for ceilometer API is 8777. If the port value has been customized, adjust accordingly. Configuring Heat to send notifications ====================================== Configure the driver in ``heat.conf`` :: notification_driver=messagingv2 Configuring Sahara to send notifications ======================================== Configure the driver in ``sahara.conf`` :: enable_notifications=true notification_driver=messagingv2 Also you need to configure messaging related options correctly as written above for other parts of installation guide. Refer to :doc:`/configuration` for details about any other options you might want to modify before starting the service. Configuring MagnetoDB to send notifications =========================================== Configure the driver in ``magnetodb-async-task-executor.conf`` :: notification_driver=messagingv2 You also would need to restart the service magnetodb-async-task-executor (if it's already running) after changing the above configuration file. Notifications queues ======================== .. index:: double: installing; notifications queues; multiple topics By default, Ceilometer consumes notifications on the messaging bus sent to **notification_topics** by using a queue/pool name that is identical to the topic name. You shouldn't have different applications consuming messages from this queue. If you want to also consume the topic notifications with a system other than Ceilometer, you should configure a separate queue that listens for the same messages. Ceilometer allows multiple topics to be configured so that polling agent can send the same messages of notifications to other queues. Notification agents also use **notification_topics** to configure which queue to listen for. If you use multiple topics, you should configure notification agent and polling agent separately, otherwise Ceilometer collects duplicate samples. By default, the ceilometer.conf file is as follows:: [DEFAULT] notification_topics = notifications To use multiple topics, you should give ceilometer-agent-notification and ceilometer-polling services different ceilometer.conf files. The Ceilometer configuration file ceilometer.conf is normally locate in the /etc/ceilometer directory. Make changes according to your requirements which may look like the following:: For notification agent using ceilometer-notification.conf, settings like:: [DEFAULT] notification_topics = notifications,xxx For polling agent using ceilometer-polling.conf, settings like:: [DEFAULT] notification_topics = notifications,foo .. note:: notification_topics in ceilometer-notification.conf should only have one same topic in ceilometer-polling.conf Doing this, it's easy to listen/receive data from multiple internal and external services. Using multiple dispatchers ================================ .. index:: double: installing; multiple dispatchers The Ceilometer collector allows multiple dispatchers to be configured so that data can be easily sent to multiple internal and external systems. Dispatchers are divided between ``event_dispatchers`` and ``meter_dispatchers`` which can each be provided with their own set of receiving systems. .. note:: In Liberty and prior the configuration option for all data was ``dispatcher`` but this was changed for the Mitaka release to break out separate destination systems by type of data. By default, Ceilometer only saves event and meter data in a database. If you want Ceilometer to send data to other systems, instead of or in addition to the Ceilometer database, multiple dispatchers can be enabled by modifying the Ceilometer configuration file. Ceilometer ships multiple dispatchers currently. They are ``database``, ``file``, ``http`` and ``gnocchi`` dispatcher. As the names imply, database dispatcher sends metering data to a database, file dispatcher logs meters into a file, http dispatcher posts the meters onto a http target, gnocchi dispatcher posts the meters onto Gnocchi_ backend. Each dispatcher can have its own configuration parameters. Please see available configuration parameters at the beginning of each dispatcher file. .. _Gnocchi: http://gnocchi.readthedocs.org/en/latest/basic.html To check if any of the dispatchers is available in your system, you can inspect the Ceilometer egg entry_points.txt file, you should normally see text like the following:: [ceilometer.dispatcher] database = ceilometer.dispatcher.database:DatabaseDispatcher file = ceilometer.dispatcher.file:FileDispatcher http = ceilometer.dispatcher.http:HttpDispatcher gnocchi = ceilometer.dispatcher.gnocchi:GnocchiDispatcher To configure one or multiple dispatchers for Ceilometer, find the Ceilometer configuration file ceilometer.conf which is normally located at /etc/ceilometer directory and make changes accordingly. Your configuration file can be in a different directory. To use multiple dispatchers on a Ceilometer collector service, add multiple dispatcher lines in ceilometer.conf file like the following:: [DEFAULT] meter_dispatchers=database meter_dispatchers=file If there is no dispatcher present, database dispatcher is used as the default. If in some cases such as traffic tests, no dispatcher is needed, one can configure the line without a dispatcher, like the following:: event_dispatchers= With the above configuration, no event dispatcher is used by the Ceilometer collector service, all event data received by Ceilometer collector will be dropped. For Gnocchi dispatcher, the following configuration settings should be added:: [DEFAULT] meter_dispatchers = gnocchi [dispatcher_gnocchi] archive_policy = low The value specified for ``archive_policy`` should correspond to the name of an ``archive_policy`` configured within Gnocchi. For Gnocchi dispatcher backed by Swift storage, the following additional configuration settings should be added:: [dispatcher_gnocchi] filter_project = gnocchi_swift filter_service_activity = True .. note:: If gnocchi dispatcher is enabled, Ceilometer api calls will return a 410 with an empty result. The Gnocchi Api should be used instead to access the data. ceilometer-6.1.5/doc/source/install/development.rst0000664000567000056710000000544213072744706023622 0ustar jenkinsjenkins00000000000000.. Copyright 2012 Nicolas Barcet for Canonical 2013 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================== Installing development sandbox =============================== Ceilometer has several daemons. The basic are: :term:`polling agent` running either on the Nova compute node(s) or :term:`polling agent` running on the central management node(s), :term:`collector` and :term:`notification agent` running on the cloud's management node(s). In a development environment created by devstack_, these services are typically running on the same server. They do not have to be, though, so some of the instructions below are duplicated. Skip the steps you have already done. .. note:: In fact, previously ceilometer had separated compute and central agents, and their support is implemented in devstack_ right now, not one agent variant. For now we do have deprecated cmd scripts emulating old compute/central behavior using namespaces option passed to polling agent, which will be maintained for a transitional period. Configuring devstack ==================== .. index:: double: installing; devstack 1. Download devstack_. 2. Create a ``local.conf`` file as input to devstack. 3. Ceilometer makes extensive use of the messaging bus, but has not yet been tested with ZeroMQ. We recommend using Rabbit for now. By default, RabbitMQ will be used by devstack. 4. The ceilometer services are not enabled by default, so they must be enabled in ``local.conf`` before running ``stack.sh``. This example ``local.conf`` file shows all of the settings required for ceilometer:: [[local|localrc]] # Enable the Ceilometer devstack plugin enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer.git 5. Nova does not generate the periodic notifications for all known instances by default. To enable these auditing events, set ``instance_usage_audit`` to true in the nova configuration file and restart the service. 6. Cinder does not generate notifications by default. To enable these auditing events, set the following in the cinder configuration file and restart the service:: notification_driver=messagingv2 .. _devstack: http://www.devstack.org/ ceilometer-6.1.5/doc/source/contributing.rst0000664000567000056710000000247213072744703022336 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _contributing: ============================ Contributing to Ceilometer ============================ Ceilometer follows the same workflow as other OpenStack projects. To start contributing to Ceilometer, please follow the workflow found here_. .. _here: https://wiki.openstack.org/wiki/Gerrit_Workflow Project Hosting Details ======================= :Bug tracker: http://launchpad.net/ceilometer :Mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev (prefix subjects with ``[Ceilometer]`` for faster responses) :Wiki: http://wiki.openstack.org/wiki/Ceilometer :Code Hosting: https://git.openstack.org/cgit/openstack/ceilometer/ :Code Review: https://review.openstack.org/#/q/status:open+project:openstack/ceilometer,n,z ceilometer-6.1.5/doc/source/ceilo-gnocchi-arch.png0000664000567000056710000034212313072744703023221 0ustar jenkinsjenkins00000000000000‰PNG  IHDRd¯KrÍübKGDÿÿÿ ½§“ pHYs  šœtIMEß  &în„i IDATxÚìÝTSg¢/ü/¶‚%€¶¸¾@e’VJæBµÕA8k¤wJœÑU{PsðV<÷¬Z;Ö÷p‰ëŽu¥í]ƒ«Ð÷Ø™ñÇ{çT}¥óÚ#JG—ÜÕ0ëÎ )Õ¶ œF:¡XBÛ5$Thßæý#<;ÉN¿!|?k¹J“ì';ÏÞÏÞ;ß<ûy¢<DDDDDDDDDD4éæ± ˆˆˆˆˆˆˆˆˆˆ¦Y"""""""""¢)Â@–ˆˆˆˆˆˆˆˆˆhŠ0%"""""""""š" d‰ˆˆˆˆˆˆˆˆˆ¦ÈýÁž8wîl6[ØmÙ²iii!_Ã2Y&Ëd™,“e²L–É2Y&Ëd™,“e²L–É2Y&Ëd™3¥ÌÅ‹ãé§ŸÆT Èvuu¡³³3ì‚ìv;4MÈ×tvv¢««‹e²L–É2Y&Ëd™,“e²L–É2Y&Ëd™,“e²L–É2§­Ì/¾øíííøâ‹/°lÙ²ÃÛ‰4u»Ý£*¨½½7oÞ ùšþþ~–É2Y&Ëd™,“e²L–É2Y&Ëd™,“e²L–É2Y&ËœÖ2Ñ××ÀTŠòx<µ'®\¹"W*‹-ÂüùóC¾æÎ;b™,“e²L–É2Y&Ëd™,“e²L–É2Y&Ëd™,“e²Ìi+SÈ–––"##S%h ûþûïãöíÛ """""""""Š$ÓÈ YÐÝÝ·ß~N§±±±xüñǹ…ˆˆˆˆˆˆˆˆˆˆ&@@ ;00€ŽŽ@bb"kˆˆˆˆˆˆˆˆˆˆh‚ÌcM ²DDDDDDDDDDS„,ѹŸU@DDD³‘Õj…ÛídggÏéºhmm•Ïõº—ËåBgg'@£Ñ@¯×³RˆˆˆˆhJ0%""¢Y£µµµµµ0›ÍÏÅÅÅ¡  ………HNNŽøº¨¯¯Gmm-¬VkÀsz½(((@\\\Èr¬VëŒ #×­[(..ƶmÛ&¼|—Ë…wÞy&“ v»=àùììlF̺ýáìÙ³¨®®ÔÕÕñ`ADDD4ƒ1%""¢ÏåráðáÃ>=AÕ^S[[‹ÚÚZlÛ¶ ÅÅÅ[û÷ïW b«Õ «ÕŠÚÚZ¼üò˪«ÝnGUU<Ž9ñûÕjÅþýûár¹‚¾¦µµ­­­0™Lxùå—G ³‰ˆˆˆˆÆ‚,Íhv»‡–dRR ¡×ë‘ »Ý»Ý“É„úúzÞÞ‚V«Œ¸úP†±YYY(,,D\\4 Ün7\.ª««ÑÙÙ »ÝŽýû÷ãÍ7ß +++ÑÖÖ†¬¬¬9±)ÃØ‚‚äææúÔ›ÝnÇÙ³gÑÛÛ‹ÖÖV>|l€DDDDêñÇGtt4 ÒÒÒ¦ô½ÈÑŒ¦ c PVVæó|rr2’““‘mÛ¶áСCèìì„ÙlƉ'°sçΈ© “ɲ.„¼¼A«N§CnnnÐ÷S~ΑdggË^²þë000¡µN§ (×n·£­­-`¬¬¬¬°zÚív˜ÍfŸúÍÊÊÓDbbÝ{=£Ã!êM£ÑŒøÚääd$%%¡··7äë\.ÚÚÚ|¶]rr2V¯^tÛø×s}}½ü<a6—Ë%ßW¼VY7¡¶‹ÿþ6Ò:w9«ÕŠ÷ßLûQ¤ dÓÒÒPZZ ‹Å‚¡¡!ÖM‹¦¦&$†êÕ¨&;;:N]àr¹dpd2™P]]-Ç_}ýõ×&zª®®Fqq1 UË5ÉØÙ³g‘­:)”ò½wî܉×_]ur.½^—_~9 ”ËÎΖ½=›šš——²ÊÊÊ‚l±‚Õj•áuEE… Ê\.Nž< “É´üäää “†´¼ÑhÄóÏ?öÄYÊI¹t:ݨÆwÍÊÊB[[>ýôSØíöƒÜÓ§O¸ožLDDD‰îû§ú§R>0þ|,^¼wîÜaíÑ´ihháãŽ;Âî )ܺuK.ÿØcÉú[[[ÑÖÖ†¨¨(466Âår¡  xì±Ç`·ÛqëÖ-|øá‡p»ÝX¹r¥O¹.— »wï–½u:Ö¯_ììlÄÅÅÁf³Án·ã£>š5k-—ïúúztwwC§ÓáÉ'Ÿ„N§ƒËå‚Ûí–ë¾~ýzŸ÷Öh4¸rå  ±±n·iiia‡š€·—©xŸ¯¿þ=ö’““‘››‹‡z€w¬Õk×®ðšF£ÙÙÙÐëõøË_þ·Û ·Û?ü0 ¸v¹\Ø¿¿\^YGÐÛÛ‹ÎÎN|ôÑG>ŸQŠÙÙÙ>=(ÕÂØÑ|f—Ë…?üCCC¨¯¯Gtt4|ðÁQ•!˜L&TTTÀívC£Ñ ??¹¹¹r‚¹[·nÁl6ËÏ¡$>ŸÛíFCCƒÏsùùùxòÉ'ñá‡âÖ­[>ÛÂß믿Ž[·nA§ÓaË–->û€€±dM&>°Îz½ÝÝÝp»Ýhll„N§ó™ÐBùYï»ùùù>Ÿµµµ½½½½º_~ùeÆ*—¡³xO£Ñ8¦í@DDD4,X€ÔÔÔ)_Y@DDD3’²gN§õòÙÙÙ2ëìì è‰'züùOòTXX('vª­­Eaa¡O|øða¹ìóÏ?7¬÷¡C‡`µZQUUå3&© zÅú/ïr¹PUU³Ù «ÕÐ V¯×£¸¸X~®ÚÚZÔÖÖÊIͲ³³‘••2¼60Ø·oÚÚÚ ÓépäÈŸ×('S›kçÎrÒ0»Ý°žÕÕÕ!—¿xñ"Nž< «Õ “ɲôxÃXظq#ÚÚÚdoé'NàĉÐëõòöù¬¬¬˵Ûí²gªN§ÃÁƒ}꺰°Pn¿êêê€`Y0›Í>Ë‹ºJNNÆÉ“'xÇýUëyl·Ûåëƒõàö½(S­þŠ‹‹ñÓŸþ½½½xýõ×åvô_®´´Ôg}¶mÛ&÷“ɄիWËe­V«l¿jmD´1·Û³gφ= Q¤à¤^DDD4ãMVºâââ€00..r¼Ñ³gÏÊç”ASaaa@Ðxƒ`”™ÍfÕ[½ƒ-çsû·rÌNaÛ¶mxùå—}ÆCµÛí0™L¨¬¬DII JJJpâĉ ï=q+}RRRаT*×ÓårÉɰ T—߸q£ ÙÅ8Áj&"Œ<pk½ÕjEmm-:„ÿøÇؽ{7jkk†°ľ ÑhÂX±ýJKK}&• µ>by½^½^¸¸8ÙÓTô² ¶m4Mᆕ”ŸçàÁƒõ''ˆs¹\hjj XÎ?Œ”ǽõÖ[òqå)jÛ_¯×Ãh4"++kL?¶Ívì!KDDDO9é–R°†"«¯¯÷™H’…êh4e/Ö¦¦&Õà6X˜ÎdWyyyÈÎΆÙl†ÙlFkk«Ïg´Ûí¨­­E}}}Ð1>C9räˆÚ ˜`ë9R'ˆ0XoÞ‰ c…mÛ¶¡°°&“Iö˜õO«ÕŠêêjÕ1Už n½¶ïäåå¡¶¶6h¨ªÓé‚._PP ƒüÖÖÖ€¶õõõrÿ §>Ä0¡zNçåå¡´´Tö´Vîë#MÀ&öu«Õ*ÇjV¾þäÉ“ªc‹íODDD41%""¢ˆ§ìM*ˆ™îƒá•Ëå’A“2lTÎjJ[[[О´ã'‡îõÞmkk“­ËåÂÙ³ga·ÛQZZ:ªò“““<«Õ*Ç 6Y“²·l¨Ïê¹ÎÎN¼óÎ;2.,,œ°^Òqqqظq£Ü&­­­òŸ/Å„TÊáDà(Êöùý©…ªjû£——‡¤¤$ôööÂd2Œ¥+ö¹p'ºÃŒ´¿ù—'Þg¤ÏªÜ.r˜F·Û “É$?Gnnîˆ/ÑTùãÿˆüáÀ–-[|ÆÒŸl d‰ˆˆhFJNNö ÈFÈ)ÃRµ,T(øŽ[+‚&¥òòò°Ö#T/Ó‰$n{߸q£6@ôÒ5™L(((u\__¦¦&tvv†=üA°ÞÈ£áß³ôäÉ“a÷-å8¯v»gÏž•½P•ï«ü\"dÏ{†"zØ*{g÷†@HJJw Š2€½°G»¯WTTàСCèíí•eŠrÅrþã3M¥;wî ¯¯_~ù%¦ô½ÈÑŒ¤ D[[[nI¸=5ÇB£Ñ„=öåDö´Z­p»Ý#~ž¸¸8lÛ¶ ÙÙÙ286›Ía׃ËåÂþýûeïJÿÏ-z;îÞ½[uÙ‰ÚþÅÅÅ8|ø°œìLm‚´p´¶¶B£ÑŒ¸-’““QVV†äädTWWÃår©î{:nÄ@_Yg£UXX(ÇpUNz&Úp{ÇNĶHJJ ;4U†Öz½§OŸ†ÉdBkk+Ìf³|^ ©Q[[«:éQ¤c KDDD3R^^žÏŒó£ dE/Ǥ¤¤1…¢Ê@×?|Õét8räÈ”ÖÇÅ‹e}TTT„®*o÷WC9|ø°|}AArss‘VÕ‰èñ¨3¶  õõõ0›Íhjjõ~PYY)÷…ººº°–ÉÍÍ•½‹;;;Þ³°°pRCÄäädèt:tvvÂl6Ãh4¢©©I¬á¾÷X{+·¡ÑhĶmÛÆüYü‡Õ=nEûªªª 9&/Q$šçÿ@ww7*++QWW‡?þñ¬!"""šÉÉÉÈÊÊ9qU¸Îž=+ë`Þ§Ÿ~² q‹¾F£‘Á–ÚÚÚFì}ØÚÚ:¡Ã(CåÑÔ…è•nà%&“¼ÁdYYòòò½`C({„† M&ÊËËQ^^POÊá vîÜ)Ë|ýõ×G]§ÊÏ=šzóÿ<þCXŒT‡£ ÀÕˆIãÌf3\.— •CMÎ¥&)))¬uÞ½{7ÊËËqñâE$''ËÏ=Òr.—Kõ³ªÕ^¯Ç¶mÛðË_þÏ?ÿü¸¶ ÑlÈ  ££_~ù%îܹÃ"""¢iSVV&ƒ¡ªªª°B®¦¦&Ù»Q£Ñ ¸¸Xõu.— MMMAŸ·‡çææÊÇ•½RC!ÚÚÚŠòòrüøÇ?ÆÙ³g'¤.²³³e¸öÎ;ï„UÊu w¸eÐjX†`Ÿ_€‡{TŒ)jµZCö䌋‹“’‰¡ FCÙ›ôäÉ“aº"üTÖ[\\œü ¾¾>d9UUUؽ{7ŠŠŠÆÊçææÊ}ßd2ɺmÏ\±þ"Ø ¶ÍŤpjË…?øÄ‰ؽ{7Ö­['_·oß>”””àðáÃA—ãÄ^DDD4—ÍcÑL•œœ,U»ÝŽýû÷û„eJ.— ÕÕÕ>!PiiiȰïäÉ“ªaSUU• ¯DOEÀ†‰Pô­·ÞRíÙçr¹äÐþË—(KŒñ*&yRS[[+ÃKFã,+ùO` ì},ˆ³Z­xçw‚n3\ Ž[[[åv §~òòòäú‹¡ F³ÈõÞ½{wЙ.— 'Nœõš••å*ë_ŒmëOŒ™ `\‘ÅÅÅÉÏ,ê:Ôv Fà ³•‹@]¹]B}V±ssså¾#ÞÓn·ãâÅ‹ªï©ÜáŽÇLDDD)8†,Íh7nD\\œ I+++qöìYŸq'­VkÀ0¥¥¥#Ž7j·Û±{÷n"++ ½½½¨­­•!bqqq@O¾ƒb÷îÝp¹\(//‡Ñh”½W;;;Q[[+ƒÌ‘á±ÔEgg§ì¡yâÄ œ8qBÖ…F£Agg':;;e]h49«Rrr2ÚÚÚdH©ÑhðüóÏC¯×ËñKëëë%MÀÛ;TôŽãÓú·eeeøéO*ƒc1­ÛíFkk« <“’’¬KKKQRR·Û×_=ì1mï°¢^ìv;ÊËËNN'?ƒ2$LJJ ˜D,//OŽiÛÚÚŠÝ»wÃh4"++ n·f³YÖN§ÃÎ;ǵ½F#êëëeýŽ%àÍÎΖ“„™Íf¹¿‹ýµ¾¾ÞgmJ¹œÕjÅŽ;PXX(ÃSågõ››‹¤¤$ôööâäÉ“øôÓOeéííEkk«\Ö?ô&"""š ÈÑŒg4‘œœŒÊÊJôööÂn·½e>77W5HU#'µaŠ‹‹U'3ÒëõøÅ/~C‡¡··&“Iu]&köø²²2èt:TWWËñaƒõøÔét(--U­ ö÷Æz5›ÍÐëõ(--Eyy9Ün·êçaeuu5Ìf3ÚÚÚ|žONNFEE…¬£ÚÚÚ€Þ¼bÝ ãââP\\,‡¨ªª LC-[QQÚÚZ9œ…Ëå’Ã&¨íC;wîT]·²²2ÄÅÅÉà]mßÉÊÊ’¯eˆ)ö×±Á°W+++ÃÚßÅø½ÕÕÕp¹\ªŸUì Ê},..±äææÊá(ˆˆˆˆæ’(ÇãQ>pýúuyÛRbb"Ö¬YÃZ"""¢£©©IöìÄøžyyy#NxtöìYÊÕÕÕÉÞz¢¢^¯GaaaX'‰ÛÓ•=Dsssƒ®‡2 5s½¾²³³ƒŽý*Eµ1>õz=rssG7VÛ­­­HNN–½}EùʱKoК-ƒf«Õê3¶©Úgeˆ»âV|µ°:œÏ}ñâEDŽ:ô=aÕÆTuN˜¯¬;e½ëtº A|8ŸOmk…ÚgÂÙ·‚­óHû»Úrb_©×®Z wÿ$"""šLèëëàíL‘‘‘1eïÍ@–ˆˆˆæÿ@–ˆˆˆˆˆæžé d9©Ñá²DDDDDDDDD4§<þøãˆŽŽ†Á`@ZZÚ”¾7Y"""""""""šS.\ˆE‹MéP‡, """""""""š"ì!KDDDsŠÑhäìîDDDDD4m¢<GùÀÀÀº»»a±X044„… ²–ˆˆˆˆˆˆˆˆˆ(¢,Z´«W¯žò÷ è!‹ŒŒ ܺu ·oßV]èâÅ‹£z“7Žø–É2Y&Ëd™,“e²L–É2Y&Ëd™,“e²L–É2Y&Ëœ eN¦ C¤¥¥añâÅò&<òÈ„¯8Ëd™,“e²L–É2Y&Ëd™,“e²L–É2Y&Ëd™,“e޵ÌxÓ!h ›’’ Ì2Y&Ëd™,“e²L–É2Y&Ëd™,“e²L–É2Y&Ëd™(` Y"Ÿîîn;v Ï<ó òòòX!DDDDDDDD$ÝÏ* š8ÝÝÝ8r䆆†púôi`(KDDDDDDDDÒ>999 e#‡,&$Œ¼¡EžÉc_@DDDDDD4W„Æ@?š››ñÍ7ß°â"£v?ßYûà¨^?xë|~µŸ¡&+ŒD([XXˆÓ§O{ÊEÑ„±B?êêêpýúu¼øâ‹"3°‡¬Ÿ¸ÿ=ª12ÓŽT“Æ ì)KDDDDDD™úúúðᇎ*ŒÐÕÕʼn¾"Y""Œu¹\hnnFbb"–,Yâóo<üËKLLDss3†††ÊEˆÏ?ÿk×®u .Ä÷¿ÿ}ôöö2”0ìÞI¤âW¿ú†††þð‡ª¯yï½÷ÐÑÑ1ê²—,Y‚ 6„|ÍéÓ§9tÑ,ÖÖÖ†žžÌŸ?ÌeˆPößÿýßñÚk¯aïÞ½¾ 0%Rñâ‹/†ü婪ª ñññãz-[¶ 55Uõ9\‰ˆˆˆˆˆˆf§ëׯchh===Rž2”ýÓŸþ„'žx‚•<Ë1%R‘––6éï‘ššŠŒŒ V6Q„xýð‡?œÐÎV .ÄøC|õÕWøæ›opÿýŒôf3Ž!KDDDDDDDD4N"Œ}â‰'&åÎ×ùó磿¿ÍÍÍøæ›oXá³Y""""""""¢qP†±c™Àk4D(ÛÕÕʼn¾f)öo&""""""""£K—.ù„±>Aéüùó±pá {¿›7oâæÍ›øÍo~ƒ¤¤$Nô5 1%""""""""£œœÌŸ?<òàøššš|^³qãÆ1—/Ê.^¼(ÿîééÁ¥K—°uëVnˆY„,‘Š·ß~ÝÝÝ“þÁ~ÁJLLDII 7Ñ —””„ýèGòÿ|ðA¬^½`6›a6›Ç\vcc#þã?þ{÷î••––ú¼f*&&§‰Å@–HÅõë×a³Ù044„›7oª¾æóÏ?SÙCCCøüóÏU—G||<–,YÂ@DDDDDD4 %&&"11Ð×ׇÎÎÎ -?##ƒ•<Ë1%RQVV†#GŽà‹/¾@GG:::&¬ì¾¾>\¾|Yõ€ý×ý×X²d öíÛÇ@DDDDDD4Ëåååá¾ûîí[·X$ÍcŠÅ¾}û°dÉ<õÔSX¶lÙ¤¾ŸcÓÒÒ°oß>ÆMDDDDDDD¡È1U¡,ÃX"""""""¢¹ƒ,Q“Ê2Œ%"""""""š[È`²BY†±DDDDDDD‘­©© W®\óòkÖ¬ÁÞ½{Y‘†,Q&:”eKDDDDDDùúúúðå—_²"ÈY¢0MT(Ë0–ˆˆˆˆˆˆˆhîb K4 ã eÆÍm d‰Fi¬¡,ÃX"""""""7n ©©‰aÈÁhCY†±DDDDDDD4Z d#Y¢1 7”eKDDDDDD47åååaݺu¬òÁ@–hF eÆÍ]‰‰‰X²d +‚|0%§`¡,ÃX""""""""òÇ@–hø‡²O>ù$ÃX"""""""" À@–h‚(CÙ'Ÿ|’a,Ñ××ׇ/¿ürÌË/]ºyyy¬Ès?« ò Àf³…ýúxiii,sŒenÚ´ }ôžxâ Øl6Öç4”Ini¨A{ÃÀæƒÕòqG—g^¬yîgЦ¸“Òœ9FÔ*–wU„`*ÛÒdµ§Ã°.ÁÚ¶r½ϼŠA·S>–S´ƒngÈ妋ÚgTn“ÌüM0äofƒ˜°õá¾;·gÔz9º,h©; ë5“Ï~ ÚtƒwY³ 1š‰‹w´çÒé&êg2ê‚f{ ÷uqº©†èWLÙ~;Ñû,Ï!DDDcÇ@–f%Çvô ÷Þ‰ÍÒŒ–+o"ÕƒgÊŽMèŨXßãNGì]äs1<Ð/è熤9EÙ&LÇÊà X”mi¢ÛŒø$ŃÕE{ÂjÛ`½j‚éXyÀãÚ¥™ø¸îTÐå¦Ëû碥î^øõGA·I {AÎYƒn'LÇËa½j ÙV]´\9…gÊ~5á?ŒŒö\:ÝÇ1Ó±rwU°÷0ëU\Ÿ&hS±æ¹ŸA¿Ê8)ëãtØ`:V®z®›¨ó:Ï!DDD£Ç@–f½œ—6K³¼(¶Yšq¹r׌êeC4WÙ,ÍøøÊ›X±~Ç´­Ã[ûŸÓIKCü[ÊÄÄ&àãºS3ªžß?Íçr‡#UŽ. .W¾§Ãˆ×¦`uÑ$hS‘ MÓуA·SþÐàtØPsx6¿|vNÞáa³4ûôô' %^›¢ÚkT uãè²`h N‡ —+wÁ¸«bRz™¾ùÒSc:×Ñäb K³ÞH¿ö;6\zmnÞh‡ÍÒ KCÍ´ÝV•jÈÁÿùÿþ™@sÍЯ2ÎȱW‹Büp#zê¦rŽ%E³ìæ6Óñý2ŒÍÌß„µ»ŽøÆ;ˆ(˜¾¾>tttà3·YD=diNPö65¥ÍÒì3^]‚6ú•6îì Û ÇðDÚ¥™>åŠ÷;6X¯ÕËÉCÂ]ÿå´Ã“šÞÛSú#jFøÁ~ƒß_¿íïÎGYYwöY@›n@Ll<:¯ÕOØÐŽ. líøL¶£_Y ºŸ‹v ô+Æ©Ç ñš˜Øxyk¶xÍÐð²CŠ1¡E›R[NõZ½ ÃÄûŽÔ+jÐí„­ýŸå‚-+Ž5ýŽž€cŒ²ý«=6Ò1%F“€ÔÌïý|Ê㜲>­×ê}ö)›È†ÇU-ÃÃkDÇÆ#ÿ¹ð& Ëß~ÃÛÑfiz·‰ÿöööúûÞ¤wüÛÿhßOÙƒíß6K³Ü¯øü­Övl–ddЯ,»Ý º°4^À Û‰m* ù›F<æ…j—Så[W/¾îøÎuFãQÝRlݺ•m+Öï€õZ=z†¯A]– ÛÑ»‹ãijæ÷®Ã9×tŽí~å¾µÏOÔ{öåÿ¾Ökõ²†ºvé;C°íÀöFDDÁ0¥9ÇöhÀÛk¶ñÌ«>_œ„FM–¯+™‰7Úåøs›Vû\+·^«GË•7U×ÅøBEЉμªº\‚6Ï”ý g^E¥9E{&|b‡iûòw»}·>îÚ³ÊÚ]Gð//åch \CˆÉJÔ&þi<ý ô«Œ0¾PáóI´ÁÒP#{û‰[øÅkR 9rÿ±#]ù˜hSjË…s¬in§j» ºh<ójЉ͊6.¾€*5þÇeûW{,Ü÷M5äÀ¸«"`»)ßû…_tÂ(ÿuæ1bêX¯šä¹Ð¿9ì!A›ŠÍ«~PT¶‰Æ3¯ˆkùúX½ù¥ ,Bµñ~ùŠÞ‡þ, 5h<óªêuAª!kžû™Ü?ýÛSãéWäßÊ¡?Bµ›æóGÊUk7;ÞxoíÖg½]äo?0l> ЉÄÂ9שëE;½uÅÝ ¡&ÆãKGÇÆ#Õƒmª\7q®PÛ'ýÛ‹XÖÑeÍÒ¬xïÀq«•˦(îQž£ÔÚ‚Z;U¾oçµzÕk±-.Wî zèGóù£t;}†K#"¢¹i€4Ñ©©HKK xžl„;wî>é¼ÁÛC˜¿ìiÌ‹Kš“õÐxæUÕ‹^Ñ»F\„nØ{ÌçbÕÛãí´7\€¥¡‹—>6é³Â;º,Э,ÀÚ]G|.?¾ò¦üïŸ?ê3ÞŸ²×Cfþ&ä?wÀ·G b2¢™rŒÐ¯2B·²`ÌC˜Žï—û´ÿìÔ«‹öÈTÌ`-¾HŠ/Iÿü“Gx{†Ó›Eôx=¨=–fhÓ aOâåtØdû]¼4E«}Ú¨cçL˜di¨‘_ƒõb}÷Ø>tßri½j‚~•Q®ÛûçÊ1 G3áØåÊ]ò}×<÷3Ÿí²ºh¬WMx÷Ø>o˜~|¿jà%Žgjëýî±}ho¸0|«©iÚ&Zœ•••èsA÷7ˆyâo¦eü‡Ë/pˆ@fÃÞã>åæo? ÏCŽ. Þ?tÜa…hÿѱñX»ëˆÏ¹}uÑyÎï§Üm–fƪ3]ÙCµáô+ذ÷8ŠVûcùÛÔ]ÙW‚¶ñ¾—^{A¶ó`?FØ,Ír½ÀzÍÛ®•ǃ¿ýùo]qÌt;ai¨™ôë¶·‰µxi&nú 5À燼¿{£!`ŸQî—-u§åñ4œsÝÇÃmRí<*Úö¥Ê]2ìw:lA{ºª]G‹v8èv¢áÌ+×®¡Ú‹ÿùÍfiö ]Ųϔ hÿ¡Ú‚rülµó“h§þ×âó;>çÄùCÅÞƒêNauÑþ(BD4Ç%ø¯QQHÚ²<öXÀó¼·!Âuww£»ËŠoowÃóõ݈üŒb 'µ×›/=%{Ä+zˆ‹2a­ÊíT¢_üp²æš7&ýóÄkS°aïñ€‹¸ëwÈõpú]¬7ŸC^ ú¹â¢\7Eã5£±v×9ÉWsͪƨñ*Õ=Cþfä Ñòëmª}|åÞ"öÞö©M7È/ÏN‡M~6q«¼6E54“ÂÈ IÑóq¬DÏ$X¾®D5Ôѯ2Ê÷utY‚ÞnšbÈQ]oå˜¥Ž®ö9Õæ:::Ð×ó)¾½Ý=mëj,õ±°4ÔÈý{í®#ª!¯ò<ÔRw*ì¶®FƬ.Ú£ÚËwÅúÈwÕÿIªÄkSÂXÑ—¯+t*Ʀ Åé°¡½áoï`µv“jÈñiçÁÚò\£IÇ7å¹_­w­!3t+ doxší-\b?T¶ G—Eþþöª_ª!G¶-ÿ1ÆG"öÙCŽêy4F“€ÃmAíúSðþs,à:ZÙÅpBKÝiÙÕÚKŒ&Áçqå9|¤^þ†üÍH1äx{¶+Žwƒn§ü1F·²@õü”ª8où_;ˆ1´õ«ŒÛÂû¡‹—f¿/;AQhì!K³žÿ¸nÁDÇÆcCÙ1ŸÇÄ…•n„IVí‘¿´+¡Ÿ ú•ÁoMЦ¢ßÑãÓ{Âé°É ÜëKB~†NÅ$+D3å èX†.P~AR~Y eÖ•È¢Ö«¦Im»¡(5Á†GЯ4bð9'´éùš {ø{¢ÇŠTNÆ꘲bý¼þ(†úa½j Šo ºÝ£cã14Ð?!!2M/±ÏÄkSB <Y¯šÆÜƒÓzÍä³cÈߌö† òÜ­_eÄ Ûy/TYi ÚƒmÅúhÓ3ßL1Nr¨÷†üÍxx$ëµú ?x¨¶›á¯oÏFµc߆½Ç¹CFmº;ÞxNG‚ ØëÆr·ã÷`³4‡Ê&œ^ž¡Æ_]´G¿Êvÿ·?ÿ-l–fŸýÚ_°s¶rÿð‚ê8îa;V¬ß!ïjQ^;ˆ^Ì-u§T',Ó¦P\q‰;î8utt »»}}}èS±xñb,[¶ ©©©ˆåÆ ¢IÅ@–"žè»b]IÀ-‰¢×ËH!òùÉdC]øj—f¢gø–1åú(ŸŸªÐ†h¢Œeè§b\ÔPm&F“ ¿@Mgð'Úl¨vèß(TÛõþÓ#Çè›H¶QŒ­«M7 ÇÒŒžöTŸµ¼X–f¿ž3·Û—m–æ1²7oüI–ªç»²gœãF;ô«Œ>ÇPë› M…!?uÔí&œómª!í ‚îÿÁÖK¿Ê(i<ýŠœ0I·ÒˆTÃ÷Æ41"Í| ÚTÕmëè²ÀyósØ,ÍèT™81\jûÛ Û Çð8¬á§GjKÁÚ}¨÷vÜhúÞ⇠1f«7 5%¼-¨Ïj½[@o\1fï[ûŸ…6Ý€Cô+ ¦íßHÒ×ׇS§N¡££cÚ×%11Û·oGFF7 M²4ë…êMl&hÿ/j#}yš)_rÔ>ËH·0*‰`Šh¦Y»ëþå¥|ï—«š7Bö¶0ªýX´›þ ·ZN¶P“ †Ëz­WMÞ/©£¼%uD»ŽŽ‡~¥ú—¾©’D„$b|Ü`_ÊźŠ[7åg ÒØÑeAsÍa­C¸_ ù›e`ÕxæUÕ/àÊÏ?­a7ãogŽ. jo ¶Ø,Í>Ïëüf'Üã9Þž¡ãßTÙþMÇÊU×[Ùþãµ)>áÌŠu%ò5_ySuÿV.+>«òÿ÷Ô¦d×–ºSÁÛÍñýŠãBÉè¶Ùð1¬'DPÜ¢¿¦a(‘yñIˆ^ñlÝñSlݺ• -Ø—q·Ökõ¨9T,,ˆ×¦ôRUnÃ`ÇpÓ±òÇwÑVü_ã?4‚ëU“Ï>l2>›¥ÙçPe¹bùCŽl÷¾Ãm?¿µùqC´EG—%h[P®¨Ge;m>TuY1ä‡ÿñM»4SÖe°óî Û)ïÜ™Š;ê"¡½)ŠGϬ¹.I¼•ˆ^¼4u>ƒ7xü=€óþ§Çƒÿà—¸´^Åä¬ĦxÆõoBÏŠ·À¥á:¨Â½ú’ǃßÃðÚ¹ûÌöáíó‹ýWœ;w.àyY@sþ ifþ&´7\€¥¡ŽíX¾® Ú ô£óªÉçB9ÿ¹3òsò7Ãfi–ŸÃfi†6=1± rVöèØø°Æ„œmî{ða,NY„Œ ŽëI”C¨QNætØðÖþg±|]‰ü‚i³4£¥î” †6ì=ðcŠ¸Íº¥îúoz{ë_¨˜”]R 9r|\KC œ–¯ß˜Øx8=>NþöˆÑ$ F“às[y‚6©Ã8n´Ãz­-Ãa’hßj_¸Å×ÍšCÛ [eDLlüˆ·¦n(;†·ö?+ÇßËÌß,{9n´£ùüQY·ùÛÌè! xŒÝÎÎ*ÆÍáóÄ[ûŸ…6Ý ƒÚÁ~9|…²Ý({±{ÃŽT¬yîg2Àkÿ³X¾¾DþûêÚ]ãZïMò·A”ØGS 9ˆ‰hÿþûûŠõ;`i¸€›7ÚÑxæUܼñ'Ù6œŽ4Ÿ?*Û’ò³* Í5oàæ?apÀ‰gÊŽ ¿¶çcÐí®ƒ>í¦åÊ)Yîšç~6êv³b} , 5èÇåÊ]>írp -WÞ”“÷ZfêÛeÔý p߃#í;K–;§Û×Íí¨9Tìó˜ÓÑ£žFÇÆcCÙ±€ó~¥Qþ ~¹òEïð™ßÃà€6Ëh¾æS^ãùßj¯M7 gø1 ÞÀ+§è%0 ô{ƨ(¤fz'ë“„Yjº~ŒŽ‡éX9z, 3Óˆí^ÜÝÕ?|þ‹Ñ$@¿²ÚtÃð°õho¨Á Û)ß_ùC[Pžÿïc¢ÞüÛ‚h§Cý¨9T Cþæ í?3“Ï+Öï@óù£ptYðæKOÉá…ĶUž§¢·z$´·O>ùD51߬ñþ}ýúudddðâaˆ°± €ÝãÁ Ú £˜Tˆf<|o¹ei÷^“‘6b ›´Ï;pèvxÿþjèî{äß_ EÁæ¿þÔêg!¼Ã,…wL[0ó0%~!Ýu1± 21 ßš¤”bÈQ½Pž©ŸÃé°ù\ð{¿@Wà­ýÏrƒÓ¬  \ƒ^$¯2¸« §_Á Û‰æóGѬö%wïqÕÛ1 ù›d/8Ñ ÞÿKÝDÚ°÷¸Ó.Ø­Ð9E{|zò)¿8ª}¾xm 6”Ãûç¢óZ}Àm«Ú¥™2Ôu:lh¹ò&RÂø|Út6¬Æ¥×^ðŽoxåMùÅZY·kw™Ñ½ciävVt°ïŸ?*{¢)o©÷·|] VíQ=ŠðAôXo<ýJÀkĹh<½cïµ_o;í?Ø>š¿ý€ê­ËE«q©rz†{öù÷îËú–¯+‘a¯ÿ2Útƒ,·ßÑ3êu‰˜ÄL„¾jå‹)ûîàÓLŒ]Š¸Ã ÿ¹ªíJÙqÀÑeÁå×^Pm—úUFþz'êSŒ1¼²@) öÙÌüMH5¤Êóì Û²Í¾{¬\þp£fuÑ|\w*h[*:XðÁ†²c¾ç7¿¡µâµ)x¦ì>¾ò¦÷ü¦£Ú¿-¨Åõ»ÿ2þíTmEùÿø´ºhœÚ.Àé°©~gvì "/q ÷ 7<ôú‰#¯Ä©‹=HKbDÉÕ7`šÑu»Àw}—?z½E€+ÂÛ>'pó¶Ÿô„þœw†ÿuˆë¾á±kÓá h9ñØôc á¶nÝŠOznábKæÅ'EÌç2äošÐ üí°b} >¾rÊçÖà„áÛƒ½—XÿÛS 9€J´m r†÷_F<ês+×ÿsxÇvì‘!\—' IDAT š Lj`û~0úUF¬yîgòIµå ù›¡_iô-â7ÖœaxÆè`?¤¬X¿ ÚTX¯™àtô &6^¾G°¶=žçï'†üͰ^5ùk´K3±b}`6mºÅ—`q•Ç’TCŽ uV¬ß!C.å„B"pSÖò긓jÈÁ᫥ {Ý+oUÕψ­V·¡Žs£©«HUZZŠæO¸Úåšë#z›®X_"Ç0îm-öGï¾¶iÄ—+Öï€~•1`QÌ?çÒpÚ¿~eô«ŒA×Y´ q«µÏ˜–!–Íß~ÚôLX¯š08ÐmŠO]iÓ ²Ýˆ×Ld»Q–ïß.ÅùžAÐô¶·p®Qcbã¡M7„µ­Öî:‚TCŽÜŸÄþæÞûOLl‚ê¹ÎÒPƒÁ~Ÿáô«ŒØñÆ{xÿüQ8=òüáŽY]´GuèñžúUFò7ããºSÞýr¸œPû¼8¿‰ko±LÌpH­üÑBíüæÓÖ†Û¿rýC]¿kÓ (þù%Ÿvî²â<.ŽÊu õy‰æ*1æk—ǃϢ¢pGùdˆðõÑbiIQH\$&(C̨9U‡Ê×?¼a­Íáýûúgô9¿ôÖ‘rè!ÉãÁÒ¨(Ù‹vwÙ)åñxн{÷θÏ÷‰}¿|ï îíÌi56K³ì=±ù`uD}a[kX„õY¼™Ç¢ÙqŒ¸ÒÖ‡w-·¹a(býô©%xt†ÜBÍöFlo³ã{øÿõÌü·~o4}-‡,(--CÜPÔÙLõ€ëðö̼ÆëSµÀ²TÒ’£‘æ _iüº{›÷¿×?󨆴þ’<<…e˜Û“ÇMX{P=ü·Z^ȲD@ E°|]IÐÛý{×W€ë:õ1`Mñ ãaïpiZoйÖëu*¤%yÿyE¡Ï \ïºítØÔǨ튂 € Þ@v€lpüÙÉÂ@–(ȉ/À¿) ·¬¥¡FŽÍ•bÈ™ÑðÑÌvÀZÅD\*Aì1ÞÞ¯G!U+n½gø:€¼ïøî½á”=hýǤ½àêð?Ñs6 Ö`"1%ŠùÛ F1³sª!‹Ó r;áPLÂ0³[ÑÔijj‚ÙlÆ]x{,fO㺴hVNÈåÄ>ïÁòG€Œ‡£†Çé¼ÁÛC˜¿ìiÌ‹Kb¥DÓ¤²²}î¯1èþ1Oü +„ˆíˆhƸ Àäñ M1Fì1À†<àé'Ä’Wì±Oo¿˜ÿ·÷ñÁ¨(TxÓ;ñÜL‘ à¿FE!iË<ðØcÏ3pÝÝÝèî²ðÞÖBDÄcÑôrÛÑLrÀY½Š0öq½%?ŠâФ*v·ÇìÓO§þ`sx¿ àKk£â‡ÂÁfˆˆˆˆˆˆˆˆæ0“_»å¿/2Œ¥‘¥%e[¼¼ ×¢¢Ðʪ ‰,ÑÕø S°}¸×#Q¸DoYe({ÞÉáH‡, ˆ`:V§ÃX¾®úUÆ9ñ¹´©ÜˆxŒà1‚ˆˆˆˆFí.€«0Ènù/@ÞwY/46ÞI¿<ø£Õ»?™lcµ¨b K³žÓaƒ¥¡Fñÿ=¶864žywÝN¬æN@ÄcDDDD4jÀ;<ïáä]4n[…?z§)ÁgîXÈj À! hÖûøÊ)@¼6€7ˆ°Yš#ú3¿{¬Ö«&n|"#xŒ """¢1»áñÈ¿·þ€a,_b‚ïÐd•¨b K³^{ãÀŠu%2pQö†#"#xŒ """¢Ù*//¥¥¥(ðø—Ý«ø;-‰uMC¹/Ýau¨b K³šõª ƒn' ÕýJïmÈí äãDÄcDDDD4[%&&"##K1ñ·~*&óJL`]ÓĈQ ú©¢ö\rÞ!þl³¡»»;àyŽ!K³šèå¯M6Ý€š´Ô’Ï­X¿#¬r]X¯ÕËÿׯ,€6Ý€A·ŽííÒLÄhÂZ>A› ýÊ‚¯èG‚6EN¸ci¸ 'ŠÑ$@¿² `2§Ã§£Cý€¡~yëµÿú º°µG—E>¦M7 5ó{A×k6Š^ñ¬JCÎwxõ@н{÷ú<Ï@–f-§Ã†Îá€CôzKЦbñÒLܼю–ºÓ#†-N‡ ¦cåãI6Ÿ? ý*#2×lÂåÊ]€Í«‘jÈñy£ËÓñý>†Ð¨IÀòu%X]´'ๆ3¯¢ÇÒŒœ¢=Ð.Í„éxy@o½ÆÓ¯`ùúÈîgò1KÃ4Ÿ?êóþ5‡ŠÖÏÒPƒÆ3¯ªöŒ ±^³Ñ}>ŒÅ)‹‘‘ÈFA7èvâü¡bܾÙß Û)ƒ–èØxlØ{ܧg\þöx÷Ø>´7\€¥¡º•ªëa½jB¼6kwñYÞÑeÁùCÅèGKÝ)Šò7ÿç£ÇÒ mºE«}Êl®y€7¼ñnuÑù¹”匨û#çg.›¥YŽ¥¨ìùxo·Õ­,t^«—¯SrtYdï¹5ÏýÌ'he¬–3²ûû¸î” jüƒaí®#X¼4dø£Æ?h¼ã8Šut;CöÂSûlT×)F“€ÕE{bÈAJæ÷Të†xŒà1bbŽ¢\#H)##‹St¸ïÁ‡YDloDD3ŽÙ…SuVÚÀ] òmlúá` K³’˜¨¸76¤’2<±4\x^9¹ŽÐ"x'ÍQï9'Æ¥·0cÈß@L´lDÇÆ]>X¯½‘Dß–Ü®˜È¿Ü¢ƒÕذ÷xÀ„@D3ˆû‹:œ, 5ª½ÜB-Á(g%·Yša òþþ7ÚÇÜ£m4Ví£Ë")G—Ž.‹wòŸá}™ù›BöÚ#â1‚Ç#ˆˆˆˆf†¦¦&˜ÍfÜ=üo2lýAÒ’ÓÿËûÿ_ F¡êm`Y*P²HLà¶ {Zþ ¼ýo÷‚Xx(Þƒ £00Èú Yš}~x¢ øm¾þÚ.½íx´”ï¯M û–Þ„ cMN† {ÃÑe¥á‚œÍ€œE]„OÁfv'â1‚Ç#ˆˆˆˆf†¾¾>ttt&{Tì¼ïiZàWµü¥ßÛ¡Ãü÷ÿÈ5x°á¯¢ÌÎõï[~÷¡w¿Pz\ïAÉ¢»ìY²4«86y+pfþ¦”†Ó¯àæv9Á024Ð?êuPÞBlÈßI>­ï¥µ$Yò |’|ÿ®ËB’eéÕZ¯½n=ëy•Ó—½•í‹c¸P€kNÀóÏ?Ï„sçΜ#ˆFIws:J È) DJr"²³³9(DÜßÈ ib€Ÿÿ£}®t:z›½Ú1婢¼«RÅlÖ<`Î4ŽÕxÑj nú\׊X@ªŠÍ^&`Î4.æ5 dÉ«|ùÞaåòÜ›úí¯8÷ñMøè×/)ßûÈÆ ­.^s5õWak±ôzZ²¼ÊzO!áhomR‚Ÿ^ƒ€ l­–_§¾âªËب¢ã1÷ñïcîãßG}ÅUüþ'«¤×÷×½>lé6a2&îÄ9‚sç¢Q%vXÑm6Âh‚'p© "îoäít™@æ,)„Ëû›ãú’jéK­’BÙ¬ylgà«.\.qù†ÔÂÂÙÄ ë@Ö7¤ªXÎÜäU®“ß ŽÐb7<ä´pϹw®_®\v^Ý™óÂ@nkïóXö×û<ú“·~†ß=·ûÖMë7˜ W?ù#ö­›†ßÿdU¯AQôÔ™ˆcoHâÁ9‚sõC­6=ìyXô ëm&‹TA»ó ðïo‰0|á^=IÞ§¤Zê »ó ðë?I•ÑÎaìÄ `å"`Ïf)´gëY0(â¸8h4·ÛÈú¸œœäüŸý°}q ÝÍu^ýZn\r,Ô3wŦ}OP¨J FäÞˆ=‡ü?¾î¸ØZ,8½÷½)ΧAŸÞûC—E|dõW•ÕÛãff Ëjé=O§~à!Ç‚@ù|Ýã÷ÔW\E}5õ‘ìYIœ#8GpŽ `ïÞ½¸pâ l_ã`q#"òÎÁìÊEÒiêΪë%ÄÛyP ôŒu7oÐj•*a¿/bÛ~{sà1\6>¼¶•Aì@ÄØ xníZmeزÀÇF+nNkñfÎÈ`V"ŸùÈw”Ðãòûo*AÉ£?øO¼ý’¶ >úõK¸üþ›ˆ›™¦újT_͇­Å¢œvÜSüÌ ÌY± —ß?ŒúŠ«øÝ–bΊMʪäÕWóqùýäS—yz׎Etâ åtêßÿd¢g`Ίˆž:Oþùÿý+éy=·OþH Uz>¯±ÚÛ’8GøÂá²/rŽ ;yA"âþFDäÔ*)ŒÓe (6J­ œÛŽÊYÃÒÿSãmªâ£ä6“E ËKŒ@±QDu½\ýêÞvr¸ˆ9Ó€åq1·¡Æ@–¼‚¥¾Z958*qÆ ú-ÆÏÌ@xtšêkP_qU騊ŽÇ“ÿóm|òÖn%¸p>exΊM U!ÿ¿åñqÙø/Êí¶ òÿûWÈïqŸÀp<ù?ßòÊ·æK Mäç-Ÿ¢½ðÉÁR_kŸ¼ K}5>úõ‹nßÝŽÿ=â}+‰ÆÓ!‡™œ#ˆˆˆˆÈi5Ò×Ú¥ÀåRÀwù+Ç"`2¹ç¬ò÷w´­F@ªFZ@ŒAßð2ÖÕõ@q•ˆ’j¡Gåkï!¬6 u '²äl-dØÃø»èmøè^Aµý\›S5[ôÔ™xò¾úŠ«¸as‚BÂñÀüoA‹NAKtâ ·Ç]øä0ó‘'põ“w•Ç—#~ff>ò‹Í|ä ÄÏÌèót`Utœòš{Þ/~f¾û§ðåû‡a©¯q—Gð f>òܸôê+¯ øyqŽàÁ9‚ˆˆˆˆ#$È|Púì A}%€·›Ü½êzÕõŽ Zµ H¡‰ ‰aí½0ÖmíR8n¬“*`ý_{W㣥ÚæL41 aGYò ÑSgÞSYüÌ —0¢çÊæyüÞ Utü Oëuî/Ù›þ7zêL<úƒWüš‰8GpŽàADDDDÃMöA9=^®Î¬®w¿¿É"-•×cÛÔx)˜ÕÄHaH°tÝx×jªS£4vÅU@«Í¹õ€3ÏájJœm‚½J9šøh` KãR}å5üñÿûà»ÿqª× ÅyÁ"âÁ9‚ˆˆˆˆhàÔ*éK>õ½Õ ëíýK«D”Öô^)·9¸ü•çÇÔÄHA¢Z¨#\oófr•«rÙ4˜E˜,ŒõίÎzljAR²6A@j¼4n}ÝŸFY—œOïÍÿãëøÖ?ý§[uÛG¿~IYA}æ#OpЈ8GpŽ """"º!ÁŽÞ³ºL)4ÖI᫱N «·>´ÎLé˹/­'Îá¬Z%"*Rp\Ñ÷÷ÞM%®\¹Ú9`•/·Ú×Uî=h•õßvú¿Êaµ&V€V#ر†,KªèxÌY± —ß?Œ—>BõÕ|ÄÏÌ@Tâ 4T^SVP€<1 Ó‡‰ˆsÑPËÌÌDjj*j_}‘>øú41rÕ& ‡r%­É"š_\%¢­Ýs˃ÞÈÁ­óãŽ]ƒ{~)q¢½ƒÔwwbr3xõ diÜzdã¿ (T…/ßûl-ܸôn\úH¹=0$sÿþ {?ç""""¢¡¢V«¡V«1žÚ|Ê•´2¹š°W–Ú¤¶€º6˜Eå²§…ļI|401Pz=R[!ÁÒõŽ~¯ ^Ç:+€:–êjD††B£Ñ¸ÜÎ@–Ƶ…OþsWlBõÕ|—•ƹØ÷œ»ó§†!#‰+ÂçâA4šüÂc8wÖÌQ#%n„ˆûѰ«iµ.ù–{@)·P]ïÔ NT.÷¦¯Þ¶½‘Ûô÷ÜC‚åòÄ ûõn k1põvµÞ€wÞAJJ vìØár;Y÷‚BUx`þ·ðÀüoq0¼ÿ¤DÅEB«Us0ˆsqŽ EB@0ü'%@“t?4±!"îoD£ÊÑ¡ÿðvˆöL: ˜‡€ˆˆˆˆˆˆˆˆˆhd°BÖÇegg£´æN\6Á/<†BDœ#ˆFÑöíÛ‘_nÁ¥Šf÷7"""§Èú8Fk þ•¡ "âA4Ê´Z-n´›àßhæ`q#""¢qŠ- ˆˆˆˆˆˆˆˆˆˆFY"""""""""¢–DDDDDDDDcTnn.òòò`fÿ""ïÆ@–ˆˆˆˆˆˆˆhŒ2™L())$p8ˆ|Y"òj¶/ŽáBq®}8Ï?ÿ<„ˆ8G’îæ:t”Sˆ”äDdggsPˆ¸¿‘ d‰È»ÿ5a2&qŽ Ub‡Ýf#Œf x—ª âþFDD½a KDDDDDDDDD4D‚hDññÐh4n·3õq999(-«„ÍÜŽ ©Yð ‹á ç¢Q²wï^˜Z:`kéDÐ7Ös@ˆ¸¿‘аA³v-&NŸîv;Yg4a¬¸@:­…ˆˆsÑè‘ä "îoDDD4~±Ù Ña KDDDDDDDDD4BȲDDDDDDDDDD#„,Ñ àM™™™HMMEí«¯"’ÃAäÈQjµjµÁ "¯aPÀR]ÈÐPh4—ÛÈÒ°è69ã˜!X5"?+pî:ÌŸ†Œ$žsqŽàáØi…Ø\Ï o<ïsaцÿ0Þ/<s×aÍ5Râ&qà‰¸¿[µÞ€wÞAJJ vìØár;Yºçƒ¼î†¯Ðu» ] ¥@§ƒB.ü£Sà™ÿè”a `ü'% *.Z­šƒÍ9‚8GŒû9B´ZÐm®BW})ºêK¹‘«€ øO’ö7¿¨iCÒ ÁðŸ”MÒýÐĆp¼‰†÷7""/ÿ³ŒC@w{À×Qv]·Š8Ô'9è(5À/Rƒ€¤oÂR†sçˆ!ÞçÚ¯þ_VŸSß:m.a½t &¤dXÅ:IÈú¸ììl”ÖÜÁ‰Ë&ø…Ç Íßò¹è¨ºÔg¥Û“9öãÙí6àN›ûõÝf#Ú¿ü9GpŽà1nçˆíÛ·#¿Ü‚KÍCòxb§%gúüð#8ˆãt;®ÕXk§ûõr8ëß,LH]6"m ¼y#"¢±íßöøñÓ³© òxŸÃï£òf3~ºuŒFY§Ñh` Tÿ2tHúl_sëC7E% Cã‡ijq*ƒNŠÛmÀW ݸr«Eµ¢ëà*}c=üÂb8Pœ#ˆsĸ™#´Z-n´›àßh¾çÇên®Cû•­.×ÏŠå‡Ù÷ùaòDngäºÏÞêFiC}îVº›k8óÿñ©}n(÷7"]¿óÎݲÇ[÷¹G’üðH’j,"Þ-êÄÛÒmbs=l_Càì5lBDD^çð‰D„bj\8ö½YÔk K4V0¥~ÉUoÎ}kôÇ#I Yhpß›ã¥É~x=·S9u²ãÚ{&Á?*…ƒÄ9‚8GpŽùYpð½9˜}+ÐiàâTžËœ€ÏŒÝx÷o]Ò>×iC{á žÁBDcVDh Ò’¢<ÞVYׄªº&R¬'WÊÐØÒÎô1f‹ 2T`é‚û±4c þÇž<œÍ¿‰¥S884f1¥~9-ÁÀæù¬x£{;üiV^ÏëÂM‹tºdûÕ÷xðÇ9‚ˆsÄ(€ØMšlžÀv t×hü!8>é´¡ýÊ -Øès=e‰Èû¥%EõÚj`÷±Ï°ûØ%èõ>CÝú`¤äææ"//Viö/r8ùqÌ–v¬^>KHìko*=›ož,AEêG†aõò©Ø¸&Õå~ÿ¶ÿs<2ÿ~˜›Ú±ï­BL ÇO·ÌCEM>¹ô56®NÅ'—¾ÆáÅ€9Ó£ðÓ­ß@¤*ož(Q®_º`ŠÇ^·ož(‘^O“t|µ:Kz½õÄ%ïÅ#fêSûÕ÷”~ÁÀs™ ZèžMœ à¹Eþ˜"6´_ý¿;­ÎDœ#úÐó—0Œ¥{§ðâ#l/Õ­.UØDD4ºL&JJJP€]±Ýí{«áX½|*¦Æ‡ã‘÷ãO†JTT7 èû¿ÿÏgñwOŸÆ_òo"",¢ü%ÿ&6ýä¬Ò—VöòëŸãåýŸcÍ–q6ÿk~·pö³¯ñòëŸcÍÖ±é'g!ŠÀÆv¼öf!ÖlýÛvçâÇ»s!Š@yu“r_gÛvçbÓO΢¼ºIyÛöäaîêwa¶ØøFû5S¯ºJ]Vm~âAôÑ™8AÀ÷æø;þšëÑQræ®Â‰ 'bïÞ½TÎÄ9§爎² .‹æ=—€‰¸ÏÑИU©"Ýd¯j=|¢¤ßï¿|­‡ß-Á·³Qqæ»8ù_³Gt¨8³áØ÷V¡Û÷|òÙ×ø_;A,þG|yò LWn+¯n—'ŸÀÙ#:\þÓwðÈ‚û¥àöD .ŸüŽý±¿‹og%âlþ×.¡ñ¾·Šðí¬D\þÓw”çñ¿v.B!Äf) IDATEMÓ€^ y²Ô«Îª¿*——L°@ÃÍ…†VœJp9øëºU4è ¸n³¦šr””ðçâáÛs„s8¶†€Ð0˜¦öÃß§øyœçBì°¢Ûl„±âŒF#”hq#’¼ö¦˜nrj-°zùTD„âÍ„˜‘áAøéÖyxmg¦ëõª Ì™¡†ÙâÞs81.LY4lΠמƛ֤º\·tÔ6aÛÆÙ.Á­|¹E‚\ÛØÜîR »iM*þòÖJ.RæƒØC–<êºS…n³ô‹=8@Z)h¸þ˜Ü¥¬òÜYõWLH~˜Ã9‚ˆs„“ί‹”V“&‚‹æÑ°Yšì‡Ïª»q§Mj]ÐùuîŸÅ!"¢1éÍ“%ˆTB€O>sôž;S³ù_ãäÇX½|j¯ß?5>/?7€T-[y³—¯™pùš ×Mž¿'.¼÷ߣ½ô­3CÝçëˆT)Õ´IYǰtÁ,]p?¾5•‹“y©`QD`|<4Ûí d}\NNJË*a3·cBjÖ€DéúÚqòìûüxJ$ « ?nÜî’B‡êÏÈrŽ òÙ9bïÞ½0µtÀÖÒ‰ o¬ð÷u/9þÐOæ 4|&N° Þ.íV¶=o dïv#""ï /æK7œöxŸ}oöÈÒB]¯½Y¨­Îíz>ÆË¯Žôéjüîçó0gºZ¹ïÒ zTÖ4Øë‰Táåçæáåçæ¡¢º ' R({6ÿk¬Ùú!þò–ŽoºáQ4¹éºS¥œ9E%°GˆÙ±Ží¬«ŽPœ#ˆ8G(¯×éY±+ÒiØMž<0Ùi¬ÿŠƒBDDcJEuþd¨Dút5V/—Nëïùµi€£Ï¬'òbY'<ŠÕ˧º·#Æ^¾Ö€ïÿóYœÍ—Ú.LǶ³qöˆ‰qa8›ÿ5ßtÃ@–܈V‹ryb€È¡¡uLG]wª8 œ#ˆ8GÈû\{›r™€ÐH™¦vìsb›™BDDcÊIC`Ó©½ÞG^èëÍ“½˜/÷ƒ½Ü£_ì÷ÿù¬²àÖHˆ ÂáwKð?~žç²¨WEu*kšñÈ‚ûù¦û²ä~àçôG·óãDÃ)NÅ1àAÄ9“îæ:årH Y…y¿ÓÊA "¢1eߛҚrèêÉÔøp|;+fK»ÒÞ §m¥>ék¶|ˆ5[>Ä÷ÿù,’–É*ðí¬DRõêp›ŽŸn‡Ë×LHÊ:†¿{Z¿{Z¹kþˆˆð@¼¶sßtòD4&¸œ‚kkâ€ç;çþÎüðŠFJJ”€?Û»et7Õq@ˆˆhÌ0[lظ&SãÂú]èjÛÆÙ˜3#J¹ß¦5©Xº`ŠrûœQøòäxíÍ"TÔ4áŽÅ†oœ…MkRQQÓ„93¢\ï§[çaj\˜ÛÏYºà~`ë<¥âön®ù¹yXºà~>Q¢Tçn\Šmg÷Ú—¼Yrã|*hJ+qhd8Ÿ†+Z9 œ#ˆ8GÑ€íZ¿»Ö/èó>iIQ.ÿ’w’¿¹Ÿ¬lÓZ·ûÌ™…Ãÿ±ÔýzU[ ÛÛÏíùs†úzò= d‰ˆˆˆˆˆÆ±Ó ±¹^ºÜa…ØRïñ~]·+û,«¢µBp„àþKùý''z¼^†0!Xº ! ˜o^Ùü0[ÚxOó‹g{åëÏÌÌDjj*j_}‘܈|Y""""""Ñm6º¬bG›ÒvÂ9„¢µq@g1t›ƒz\çpÖ/<„‰Òõö×/RÃ7ÞÇ¥'GßÕ÷™›mˆ òúׯV«¡V«Á(ˆ¼‡@Ku5"CC¡Ñ¸þ®b KD^-pî:ÌŸ†Œ$6V$"ÎD£É/<s×aÍ5Râ&q@†‘\‘*…¯RàÚÝ\tÚ|óõ6×C´_î5Ì ‚_XŒØúEj\±ËýÍ÷œÎ/ƒþb9Ž®£õÔ¸ZoÀ;ï %%;vìpýµÅ!""oæ?)Qq‘ÐjÕ "âA4Š„€`øOJ€&é~hbC8 C@´ZÐm®‚hµ ëvåV¸N ⣤˜3$Ðĸ÷… âP˜¨U€É4  Íwu=Ðju¿¾¸JT.—Ö ²O}§ Ýf£ÇÀV‹†ßÄHø…ÇBVÁ/2ÁëƒZîoî*k-8zæ:Ž®£²Ž‹ÑØÆ@–ˆˆˆˆˆh èn®C÷#ºÍUÒ"šwQñ L ¡Mpš©ö³$C‚MŒó½‡vqNµJúê¶—ºLÏϧØ)c-qº\\%¢­]@u?µØ\®æztÕ—: ÁRü"à7I¿°n€^ÈÜlÃéür=sç k\n‹ „.#™ƒDDcY—Òš;8qÙ¿pþ‘ADœ#ˆFÓöíÛ‘_nÁ¥ŠfÑ8ßßäj×n³]·+ÕWur¸µJ WC‚¤jUªÁ§ÞKç×ù²s€+Wç–V«cPÝ  ­·L»Ó†®úR—Ö/Rÿɉð‹Ô°/íWPVÿÒ_þb9Ì-®oòÊŒ$lÈšÝB†±D4v1õqÖ@5ü+C9DÄ9‚h´C­7ÚMðo4s0ˆÆáþÖm6¢«¾]wªÔz`b/B#@#ý_ $¾Á=ÈÕ¹žÆ§Ø´Ùc`¬QRí9¨íÙò@‹†ÿ¤øß?›´c@e­§ó˱ÿT[K‚´¤(lY•]F²O,âED¾,Ñ0‘«0»JûmA09\j5ª4Ñr{†¯÷J®ª3Í1žÆ: ¤Zú·¸JÄí&÷q›ëÑÙ\NãçR‹ƒ¨øGK_4²^8xôW\®KKŠÂSYÓ¡ËHBb,ï$"ïÂ@–ˆˆˆˆˆhuÝ©B××Eý†°ñÑR¬6Aª€U«¾ŽMŒs?]&‹#œ-©öЗ¶Ó†®[EèºUägïŸÿI Ìp¥¼A¹œž…ÿý£eHOŽæÀ‘×b KDDDDDtD«]·ŠÐy³¢µÑã}&‡‹˜3 Ð&öjM€°cƒÜò`Î4éýhµJ´—¿òPAëÎ Á˜2þ÷Í‚Ì*Íá’£ÂyÜ”5`ѶãÐ-L‚.#+3’|¾MAnn.òòò`fÿ""ïÆ@–ˆˆˆˆˆè.‰V :Ê.H•“ÄGK§ÊÏ™hb¾z‹`Çû0Ö¹EîÕ³¢µeÐQvþ÷Í„ä‡ÌƒßnË–Ui8j¸Ž£g®£±¥ú‹åÐ_,GdhÎ.LÂÊ ß\ÈËd2¡¤¤Àšl"ßÀ@–ˆ¼ší‹c¸P€kNÀóÏ?Ï!"ÎD£¤»¹%ä”"%9ÙÙÙ>ÿz;«þê1ˆ•+a3g N§Å“7ÓÄÙËýgs‹Dä]u]L®š‰`v¼íožôähüâ™Å8b¸ýÅrœÎ/‡¹Å†#†ë8b¸ŽÄ˜pè&ã{Ë´li@DcY"òîƒ!³&3`âPç¢Q%vXÑm6Âh‚'øùîëì´¢³üSi¡§RâD,HPN{'ß$‡³Ùˀܿ†ÏáR5+³šyHú&„€`îoClCÖ lÈšÊZ ôùåØªUuM¨¬kÂþSتéÉQxjÙtlY•ΖˆÆ²DDDDDDÐÕPŠŽ’3n=b=è2¹(×x”ù ôUlòþ&}É:Ÿ£óë"Î|þQ)¬a«ÂÖUéØº*eõ8pê ôùehliGAY Ê.0%¢Q @#ŠŒ‡F£q»¬ËÉÉAiY%lævLHÍ‚_Ï™""ÎD£eïÞ½0µtÀÖÒ‰ o¬ç€yÑþÖQjp«Šu±ïñN«‘¾t™@Î7ìá|§ íWN`BòØšÉFéÉÑøí¶,˜›†>¿ G ×q¾è&†ˆFE,€ ‚€˜µk1qút·ÛÈú8£ÑcÅ Òi-DDœ#ˆF¼ yÏþ&vZÑqí}tÕ—*×ÅGkÿN àˆœ©UÀW ¸üpø(=f;Ê. »ÕŒÀ™s†YdXKK"¢±ˆ,Q/z†±‹Ö.B‚96Ô»9Ó€=›ãgm ºn¡`(;*k-8_t•uŽÀ51F…ų¦ 1ÖQ²î|™ˆh,a KDDDDDäAGÙ—0vãcR¿P¢ 6=&U͞Γ®ëºU„Ž`&$?̺ •µ¼øÆè/–÷zŸ%³ã°sÝ|,™Ç#¢1ËC@DDDDDäJ´ZÐY‘«üŸa,Ý-]¦TY-ë¬ÈEwsf Êê±hÛñ>ÃX8WXƒÇvÄÃ5Y d‰ˆˆˆˆˆzè(» \N@dK÷dÓc@Jœ¨ü¿³ê¯”A07Û°bן`n‘šò®ÌHBÎθvpƒrŸv¯Æ®õóxvßœ+¬áàјÄ@–ˆˆˆˆˆ¨‡®[EÊåìe„îÙò‡ÛQWC)ÄN.¨:PôJ›³sŽïzº…É.=b—̎îõ píàÓHKмôÆI d‰ˆˆˆˆˆœtÝ©R.ÇGK=@‰îÕœiNÿé´¡»‰m êt¾Ô¦`×úùÐ-Lîó¾‘aAøÍ— ÊPYkñúן™™‰íÛ·ã{Ò¹9ù²DDDDDDÎÚÌÊÅð‘ãAC&!Æi{êjç€ PAY©UÁ@¤'G+U²•uM^ÿúÕj5´Z-Dps ò VU¾ª®†Ñht»=€CDDÞ,pî:ÌŸ†Œ$–®ç¢ÑäƒÀ¹ë°fŽ)q“¼úµt™Nµ·ùÞÒйå´=u·šáÏýmPÒ“£|_¹—l£½Õœ6îv¨ð6¼óRRR°cÇ×yœCDã…¼Úföž÷¼æ9››mzMã™ÿ¤DÅ%C«Õr#'Μ#8GŒ‚ÊZ ^ïóì>W%âÁ9‚FÍ‹‡.`Æ3G°ÿTÌÍ6ˆ¢s³ ûO`Æ3G\ÂÔÁjliǹÂ倮”7ŒÈ6½ÿTÁˆ|8cn¶aÝÏßǹÂ4¶ðÔå1%Ààtv Èý‡„î]ϰK˜ÉA ¹ýÀÑ3×tÿs…5J«ù{ɳ`§Ë—KÙ¢…†FžÓïMÎtž1õqÙÙÙÈþþÎ]¿ð˜q;æfŽž¹Ž§–M—~‘®éçÛßgAY=mËñ‰~HÄ9‚sçˆñdûöíøæšÍœ»Îë_Ë Ïcÿ©¤%EáÚÁ ¸¸/Þ³÷eãƒÝ«ˆg÷ñÊôˆýgTÖZ°â_N*}ilïo§sV+ǔ8þ†]wK·Pê»ûØ%8UÐï¾nÏû€§–MGd+dû²À©zñÌ—LŽ Ý›Ü¿AÙŽ‚¤qHâmÇQQkaå–—ìo& °÷¸ÈP–îJ«UÚ~ÚlÙ{ÞÃŒÍo!{ÏûÊßA¯l~˜ƒ×D¸¶-ø¯“œëèîõüði>‡¤W di\ø/ýD„B·0Yùtµ¯Ó]ô˰h[BVÀ”õ‡ðâ¡ J/Æž=Þ*k-xvŸSÖBȪ˜±ù-ì>ö™Ûc¾pð<^8x•µdïy!« dÕdïyÏå1®á¥7.HÏÑpÝåg^)oÀþSxjÙt\;ø4¶qŽàA£àÀ©+€_l~¸×Ê£%³ã°ký|lѹ×EÈÛ®óv~/•´G ו}ÒÓ~Ó×þÛóg?¶ë$ ËM(,7¹õ°u|èáøþg÷Ü>À9b¸¦ì›36¿…UðÂÁóÊí»]Âì$5.¾–­Ì94vØW]ª®ÊÒ Éa>ÃØ{„‹¯e+b‰ç÷r—6Þ½šÕ±´J ç:º[Æ:×ù.ÀKï_pÈ×UÖZPPÖ œŠ¼!k^*k›pôÌuœ/¼‰¼×Ö"1V…ÄfOBAYb±xÖDÚÑ'Æ„#ïµµƒZ]”ˆ8G µóER€¹xV\Ÿ÷Ûµ~ÛuÙ{Þƒþb9Ïš‚]ëç+-CôË‘³st “õ\žÝgÀÃuåñä}ò|áMüaç ,™íxŽ/<ú+HKŠr»ïû»¿ôäh,ž5öEcÏš‚Ä•Ëþln±a‹. +3’PYÛ„#öþÓy¯e+þUuM8WXƒúÕˆö`Àynø`÷j—çEcÛƒS‚Ž bç!?ø¶­†cC}3|ès]ÃØØÉ@ÝŽÍ݈ »Wãè™ë¸â¡ÝKDh ÏŠƒna6dÍà€ B€'a_Þ>×ýìðƒošŽ l¾;þæ;•(â).æÕ'²äóöÛ{ =•5]¹N—‘l?ø+s9ð37ÛðÒ¡O‘Ž‹NVO-›Ž…ÛrÜû¥7>…¹Åæ€ì>öv»„#†k. ”5`eFŽïr„0‰±áØ}ìôùåØº*]9@;z溽ºhÓ}UHä[JÄ9‚s2¹i°•GG × ¿Xî¶o]•Ž…Ûrðì¾3X<+nÀ{®°G ×Ýo‹.3žy Ïî3àÚ¡§¥ç\kÁý,ž59;W~ÆÊŒ$,Úv»]Âñ]c×úJŬóþ%ïÏ=Cã´ä(¼xè^€ó´0PÏ? äê¢Æß,"Μ#ÈkÜMK ýÅrR«—DzïCæ›R};ò>$W»ö<`¯´WªJûo¹²9ï¿éÉÑÈÙ¹ÂcE¼ÌÜlù¤%E¹Uðn]%õ5ôÔæÄùƒocë”NEèîFûÕ÷`Íý ºJÇåö>gðüZ`r¸ãD鼫þý-§ó¸à9‚‰ÓyÀ¿¿%ÚÃXI|´´ýd>È1¢»Ü¶L&”””  €yN*€§ U7*¿»s¥`6ïo|È¡Õ*Íw;º†±QÄSb9Dýb…,ù4ýE)<‘*ÌÜ{6žÎ/Ge­EYŒ¤Ê^ñ#ŸžèlÉì8Ð_Qþ/W–7à±]'=þüÂr“ÇÇ¡!<`üâ.àÚ‡ðüóÏs@ˆsqŽ!ò6>…åÒ)¦=ÝÂdÐ_Á•ò†·-¨¬“Z ¼ôƧî·ÙÛ\)oÀ’ÙqÊé­ž»ëïç]±?ïų¦x¼=-)JiSà¼{š+¼EP€TÕ'8n(ZÑ~åü"5Hú¦ÛbYÝÍuè(1 §,)ɉÈÎÎö©m^üëÓ _H¡€T-«Ï>þ\ÄœÀŠÙqªØä‰È»*ï/Žýfå" ëC_]èëû[Oeõ°´¶ßÕ÷ö×^‡\ÅØ,ø@¡ý:“E:K@Ÿ+UÌ.⇠ã–É"…óîÚŽ%Ò^Kئ`ÀÈ’O“«Û ÊPà¡Ï öë©Ç]O¡®§OÊÕj"ìÕ#n¿ø§ð Ýf#LfÀÄ¡ ÎÄ9bĤ%EáJyƒËž÷±k˜„‡”ÚTÖ5!=9jÈž‡Ü—ÕÓ>–Ž„˜pD„p„·÷’öÖJÁWÐEQ dÿ »:”ýªýË?À/Rƒ ©Yð “ ŠVt›0šà ¾y"^H°FÌ™äœQZ#O›M@ÞU ïªtÛ¢¥É·åý È-rmM K‰ñýLJ/ û›³]Àù¢›wõ½ã¡¥ÁP  ƒÔA/аØÈÁlÎ_D,Ÿ'`уà‡Pㄱ0|îùƒ§QÄ£‚€Ó 0%ŸUYkÁéür¤%Eáâ¾l·ÏxæŽa‹\5ãéÔ`¹:¦gø²!kú€Â"âAä+t “p¥¼¡ß,^:$õ]½ùûͤ ··?äÀt0ä°õÏ{Öô{ßÄÎã¦R1ëL®¼ê­ŠJþ9½=wyÿ÷µ wçêXÿ¸¹€_:+r•ëºÍFØ>; ÿûfaBòÃãjÐÄ;Ö (6úOÁ, ºyù+`bT5›5Oà¢8>J\¾—ê0%˜ˆ“z k5¬#ï—`« à €sNÁ¬|v€>H—úl§?À>³¾F®†Í-’.÷üàI#ŠX"HdU¬GÁö1 Œ‡Fã¾(Y—““ƒÒ²JØÌí. ãÜ+NîéöË%V…•I8_®,¬³Ä~ v@ÅíôÅÓöÇ“¥%I>ÎarPYkÁc»NbeF~ñÌbnˆÄ9‚sçìÝ»¦–ØZ:ôõ^û:¶èÒ±ÿTœºâ±O2¼pð<Ì-6<µlºRY*WÖö\0ú9Ò"[•–…óE7ÝÈ€|Í€ªú&üç?|éÉÑX<{ Žž¹ŽóE5nÁé?ýê î4ÛpݾXOéÉш Ä…¢›07Û\*eÍÍ6–›<¶Bð%]7<ﻘ’†Ž² èºUä¸íVºnÁoòÔq·¿i5€ÖÌ>QpÃqPê\5«V©ñ"椬œõB—¿.—:‡°îáâ¥/±Ãã•Í+gEôæ|Q ÌÍ6=s-íøÍ—¹ýn »“ ÍC0 HârÏä9Ó¤/†³ÞËX'½Ÿ¹E"ªë=Ïg b&ÀA@ÌÚµ˜8Ý}]²¾¾30VÜ Ö2ž°¯œ®Ëè½/܆¬é8_Ž·ÏcCÖ eQ‘£g®ãÙ}üP—Kk;öŸ*P‘E†a‹. ôWðì>þóFdXÌÍ6¬ûùû¨¬kºçJ™+å 8_TƒÙS£½’5çÎ4ö”””øÄëˆ Â+›ƳûÎ`Å®?a×úùX™‘„ÄXÎÕà¨á:Ž®#-) ¯8-àµký|ûþs 1áJ»ûØg8WXƒÅ³¦x w{³uU:Žž¹Ž—}ŠÄ•²O1\ÃÑ3ÒÏ—O—‘ŒÝ1—pàÔ,ž§Üw÷±ÏPPÖà¶0Xa¹ ç‹jŽÄX¶®JÇîc—ðì¯ øÍ²”ýyÅ¿œ„¹Å†=¾ß׈ÒѶ¬BàÌÇ!&?Œö’ÑÝàXÉ£ûv…Òæ ««k\íoZÄ™,Rx÷ñ_EÜnr¨š,Ò"`yW¡EªFª´MçÜ8ææêj{(at^¬Æ=x˜.bùCÍdø4Üò»Až×w­_€ÇvijûÎ 24hÀ}É©r0[ Ž³2ù ˆ‘9K@j–«Âo~¼ /º€EÛŽ#24f{ ų¦ gçãƒz‰±*üöÇYøÇ}<¶ë$"í­BÌ-6$Ä„#gç 徑aAÈÙ¹Ù{ÞWî+ÿì§–¹¶‘+oÿ~§TÉ~|×ãØµ~ÌÍ6Ð_þâ!·ýyëªtŸ|ŸûÈv7×)gRÁ*¥=®;Uè(»àlí÷­¨¨ÀéÓ§±råÊqµ_¨UÒNYߤSÛ¿Š«\ÃÙž¡ …²Ú)¤b¸7’Z­RQ]W9*ýz39\Äœ½)‰¶ IDATi@æ,š†c‘ü¡ác»NâÅCȃTû×·”ø @]ûT× 8þÏó?ˆý9¯Ä{¯‚•¥ˆ"Ò©Ò/yàb K>)1&ì^= E6>ؽÚåô—Ȱ ßõ8*k-¨¬kBDh Ò“£±ûØgna‹|ß‚²z嶈РèìUBÎz;Í&-)Êãs½vðiûé6¶^_Ç@NÝ!"ÎDÃeCÖ lÈšýÅ2¥jDhPŸ•®²f@—‘ }~ªì¦sÅj_Û¾§mZ·0×fŹ<^ZR”Çðôäh\|-Û從Z.üâ™ÅX2;WÊ”ö#òõOÙ+çûÚŸŸZ6‹gŹ|oosßÑÖm6ºµ¶ñŸ”ÿyß•‚Ùë@l3K÷íî†^¯Gnn.t:-Z4îöM °é1ÂÙÜ"%ÕªëÝïë|Ê/ U–i5‚REËEs†ŽÉ"ƺ…Òû!µœ`ë=ä¯+ëšp®°†\“`Ø«f4B g+D¥‚»¾æ¹Ôx~5\ŒuòNÒï “KË~÷÷)RØžhÿ7˜!ì°a K¾¶Äªú\õ¹çÁ™¬²Ö‚gu[tiÐ-LvyŒÊZÇAž§Çèï4šÞn òøBdXP¿Õ6ƒ9­“ˆ8G ÝÂäAU E†õÛ×ÏÓ¶ß×~2Ð>½oo¯i ûó`æ˜ÁÜw48/ìÕm©íõ~þ“€é¡ýË?¸TÕšL&>|z½k׮Ŝ9sÆå>¢‰²—9V)/6Ú«“sGÏ\ï÷ôf"âÁ9‚ˆ|Uwsí€î'bcca±XÐÖÖ@ fýë_#55+W®„V«·ã¨V™J_€ œ>*W/yª m³ öÊ2÷êøh!A´ Òc«#ÆOÛƒV+PÝ`_mRËV›§ª×¾ ¹V› Ø+õ¾z;s³aì(r®œ…  @-¤êÙ*Apko8zm{š3Õ*©ÝÁÄ ÇYãõÌ}Ø„>ç<("Qiá©`ûûD#‹,Q9;WzÑ9[™‘„ßþ8‹DÄ9‚sKbs=ÄN+„€þ“>•J…Ÿüä'0 0 J0[RR‚W_}©©©X»v-4͸×`yerÇÁ°smiMßÉrøè©÷©\Y+ÿ¹ÚL5€±Wi+‡ Êe›}Lª÷é»Ïkßã51ˆr­€È÷‘÷07ÛðÒ޾æiQEÃ+Âþ•jý¬Z¹‚ÖØGh²Ø[xØïå^´šLJPñÑN—½èÃ)ùuŽ*WÀºÞ͇M€Ô‚@®~ÔŠ€áëØÀ@–¨‡%³ãpýÐÓ8WXƒ+å HŒ GZRÔ˜>ˆ8G ¿ @w »©NjO0!!!ÐétÈÊÊ‚Á`ÀéÓ§•ÛJJJð³Ÿý ‹-‚N§ƒZ­æ8;ÑÚƒB]¦£ÍAC£Ô°Á,ÂX']ç©Ý3Ge­Äy!±Þ ´òLÛÏfÐj•ÕÁ„Cir¸¨œò,ŸÁöý¾&&NœˆAÝ6Ü^8x…¦Ý÷JYƒË‚‘¾ð7Zff&RSSQû꫈ôí,R0èVBZ¬ RHk륒֙<¿õ·à8›p p=I½‡Ï [­ðx¦ƒLX>ÿõºÊ4¢ˆ`±‚€û U¿F81- d‰ú]Øüˆ8Gv{ {§jÀ¬Lf333¡×ë‘——§Ü–——‡¼¼<,Z´k×®• Ç(§ìjÜÊ‹R©Ò6˜û¯ªíË@Ò #!%Nê9)‡¯Ž¾“ Ëh4bïÞ½HMMÅøC·Û÷»ß¡µµ;vìñ}õJyÎÝÔ÷D„â•ÍûÆ VC­V×;‘(-à ÖBª¨­„´p˜YQ'° òñ«KGwþº·¹I%Šˆ€¶N²W½F‚ÁëXd…ô!ƒ¥º‘¡¡ng1%"¯8wæO CF«‰ˆsѰðÐènî½^É/<s×aÍ5Râ&y 6mÚN‡œœ(·åååáòåËX¾|9–-[Æ`väSîçLs?ØoµF{¥–ÚJ׋Êå¶v¡Ïj®‘ L ÈUlŽŠ¶‰ö6íšhçê¶ñ@ô·¿ –ƶµµ¡µµµ×ûÕÔÔà—¿ü切²žMí‹na2žZ6½ý}@¬ýßža­ÜöÂZY¥(:¶k/ (ƒÄØŸ»\å !-º¥ŒCW¯Q àmx礤¤`ÇŽ.·3%"ï>Fœ”€¨¸Hhµ<Õ‘ˆ8G ! òám·ÙØçýü'%@“t?4±½5jµ?üáQ\\ ½^ÒÒR@[[ôz=>þøc¥ÕÝ›`穞CÛžäöý©®—ß¾~v|tÿãh#à²5ñÍÀ~9ým ä06,, aaa}Þ7<< #Êþâ™Å|ÓÉ…ÜöNÿJ;‡çù£²—Ëï+Šh`³µ ºø‹"bEþ}„¢Îaª'Îk0s_Ï|Y"""""êŸ?„€ ˆ6 ÓÑj|ïUçZ­Z­ÅÅÅ8~ü8ª«¥sHÛÚÚpüøq èt:,Z´ˆïÁpY®ÇæœÃصk×âäÉ“}Þ_¥RaÙ²e8~üø¨TÊÝ­Ä^.€U¥¶‚Ðç"c›Äúùq0iH0õqÙÙÙ(­¹ƒ—Mð á€ç¢Q´}ûvä—[p©¢™ƒAÞ%4h”Ó®;U¸Ö=´V«Å¿þë¿"77z½·oß - tøðaèõzlܸZ­–ûÑêÆ¬CiLL Ö®];æCÙU­§¶ðÍ&ªd+Eµ‚XE/aì·Ðwå+Ñ`1õqÖ@5ü+C9DÄ9‚h”iµZÜh7Á¿ÑÌÁ ¯â?)ö@¶»é0„¬,33™™™0 Ðëõhkk ³¯¾ú*RSS±råʳÜ߈zw·a¬l´CÙóE5wu_UH Ò“£¹Œ3rðZe¯€µÉÁêÖTQÄ|†±4ÄÈQŸü&%¹€î¦ºaýYYYYX´h  ƒÌ–””(Áì¦M› V³74Ñݸ×0V6’¡¬¹Ù†u?ç ký½¿Óц!24y¯­Eb,û/¬¢ˆ? ”–×}«¢(Bèq{„(b%‡‘†ão+õyÐàÔÖF´Wʧèt:ìٳ˖-s¹­¤¤;wîÄáÇa2™ú},ÑÚëg‡!vXùFÒ¸7Ta¬Leå…¾Z[[‡åy¿xèÂ]…±=™[lxñ ÜÆ‘`A€§%áDQtùApù?øÀå ÛYgù§èüºo*ù´‘ceñЗÆVÖZpàT^8xÞí>æfNç—á¨á:*k-ÜHÚf¼à’‡ßžÂXö¥¡ @#Šx .ÆívVÈú¸œœ”–UÂfnÇ„Ô,—?¤‰ˆ8G¬½{÷ÂÔÒ[K'‚¾±žB^Gˆˆ‡ØX @ª@÷JÏ+33™™™0 Ðëõhmmõx í\ Õqí}é€èþÙ|cÉçŒV+ŽJÙ‚²z¬Øõ'˜[lHŒ Ç/žY¬ÜvÄp /úæ›rÝÖUéxeóÃÜÆ)+€ÓJz\Ÿà>8Zgi# ±6bÖ®ÅÄéÓÝng…ì8øEl¬¸n³b‡•BDœ#ˆFQII L5åè¶/2DämüÂÜu[jÇÜóËÊÊž={Ü‚ŸÞpé(=ƒî¦Z¾±äsßf+ÊJÙÊZ m;®®¡A.·=»ïŒK ûO`÷±Ï¸AŒÇ¿·E—06R+‚ ‚ UÁöøžXË{œMA4¬Sqˆˆˆˆˆh ü''*—»îTÉ瘗—‡¶¶6åÿ=OOui_ÐiƒíË?0”%Ÿ1VÂXÙP…²»IõŒ¡ø`÷j\ÜçXÄlÿ©åò+›ÆÍßoÆSˤj´§®p£G¬þÛþesú .E±@ªÓ}Ã×`+E‘}ciD1%""""¢<„9jŠÄ–ú1÷üZ[[¡×ë]®ëYëV-key¦y;“É4¦ÂXÙP„²ç‹jHë’Ùq.·Î—ïKKŠÂÖUéˆ Âo·e!"4æÎÖpã.Á½*V%ŠxÀÿ+è¹7$ æÛCÙå¢È¾±4òSqˆˆˆˆˆh „`„û©Â6t7שçg0\ªc©BVþòôùµ0”%o×ÐÐà¶ý*• *• ·oß¾«çXY×X2Ë5Œ-(«WnÓ-Lr¹--)ŠÅ8Ðà(€àZû(b³ ¸TÅö´Øþ•Æ0–FY"""""0!Ò±RpwÓØ d[[[a0ÜŸ¯ (_žþ/›ë`ûò|ƒÉkiµZlܸõõõ8~ü8¬Ö±ñƒÕjÅñãÇÑÜÜŒçŸjµú®+1VåòÿóE7•Ë+3’¸Œ3—E87ÐQ‰"¾àQU±= si”0%""""¢@„9-ì5ÆúÈêt:¬\¹)))wõýbsÚ¯¾Ç7™¼Vffæ˜ e{†±æž¯²ÖâòýÅ2RoÙôäh—۪앳ä{j!±=«b°YÈ!"oø{ŠC@nÅÄHåòíVŽŒ¶§7‚8 œ#ˆ8GÕß“”ËÝÍcg1¬deeA§ÓaÇŽ.·Î]‡ )ËôM‘N¡rO]·ŠÐU_Ê7š¼ÖX e‡2Œ•Ûœ+rôƒ57Û” Y]F²Ëý÷Ÿ*p´9èÑsÖ[ßÓíÛ·ã{ÒÇùö}À꜂Øÿ` €`Nä%8Ô“ì8 ÄÔ*r@hDÔXœ¶µÐhç"ÎD£¨­Ãq +Lp=¼õw dÅæz¯x=þ“ûóžôMÇó·6¢»­bsÄNºîT6ˆc¬7.Ñ`effÞ|óM?~|ÄùêÊXÝÂ$\)oÀK‡>…‰±á8j¸îr»ì±]'•…¼žZ6Ý'ÞOµZ µZ=®ÃÆJ‰¢K HU±K¸ËÓdPÀR]ÈÐP·y,yµÀ¹ë0j2’T "òÉ9Â/<]Õ€ 0MÍ÷•†_uc·côPM*„E+al×*øOJ€_x ç®Ãš9j¤ÄMòŠ×)GÀ?8Â-¬m/=‹Îª|·>³DcéwCûÛh…²CÆÀ]:öŸ*€¹Å†ÜçÚ+:!&º…Ž Y9Œ Ä®õó¹±x9+€óúÅÂiNÖˆ"V "8D4FÕxÞy)))ngï°e¹ÿrWÅ*— ous@hD”68ªßœO‰ïÿ¤DÅ%C«Õr9G矜#„ÇÁsu#«Òid8W¥ Ý„9~È}d…€`øOJ€&iÚ0£¥óëBt}]À0–Æüï†ìo#ݾ`8ÂXˆ »W#!&ÜåúˆÐ@äì\árÝ]v­ŸkŸv[Œ¼K%¤ö—œ® ð-Æ’—c…,¹ÿí´rîÍ&©oßÄ üƒ”†—s°ç“ÂáAÄ9By½©è¬Èun4ì¾29Yÿ(÷}ÎoRºnº}äôþ®;Uè(=£´+E‘¡,ù„ÁTÊFTWW÷úX>ø T*Ï!çp…±²ôäh\?ô4ÎÖà|Q Ò’¢°xV"Ã\{Ëÿâ™Å|Ó½œR{‚Âs0«bÉ—0õqÙÙÙ(­¹ƒ—Mð Ð÷Á"â!ÚO,¼%b†ŒÒð¹Ý&{ÊA^¤†ƒÂ9‚È'çˆíÛ·#¿Ü‚KÍþ¿° HØ,°v_™º1MÍ“œhøÞaí´ÿ'HåÒ;\Ù.~gŒÕ>²ÝßDk#Ú¯¾‡n³Ñq¥„.7ò e?ýôSÔÔÔôú8µµµX½zµÛõÃÆ:[2;Î'ê"ÏJèE6§06€@*?$#Â@ÖÇi4XÕð¯ ܆«E‡=lù ¤ 4<ð£áó~q—ã/jšËé¹Ä9‚È—æ­V‹í&ø7šõ}þNÕˆ”tcë"îs4|>)wìsþÑÓ<ÞÇ/,B@ÄNDk#D«Åcp;–÷7±ÃŠŽÒ3ʾåòú"¦ ûv¹×¾‡/¿þ9þmÿçýÞï§[çáåçæ©ç~òã ¬^>•;â0h(ë©×!üò—¿Dkk«Ûõ#Æ’ï²8 )uî›"ŠÐ x„H¾†,y>ð»ïAt–]€ØiÃí6é`x…–§HÒлÝ\ªvœŠ yˆƒÂ9‚ˆsD’VB£¯L"«diØ|fìVÚA˜° ÷;‡Fö纛káì½Å+:«?G§ñ¯@§ç*X!4ðâ@V–>]HU`¯·O SÏ7iÙ1$Æ…1FC½ÐÃX —œëQ«E<*¬Š%ŸÅ@–<ÿŒ€”,t\{ðIy7Òî÷CœŠ“! ¶‡>ëtlwñð·¯rLœ#ˆ8G8ísÁ*øß7K e¹ /,Ø¿™†|Ÿ{÷oNÕ±ñóú¬zõŸ”€N{ Ûu»Òc¯Ù±¦óëBt–çB´6öz¿¨iü}ã0鵋°4cŠ×<ߊš&$ޱØ U(Ë0–îU#=€*À¥*ö!QÄVÅ’c K½o÷ÏB§ñÄæzX;7.uâ…%<ø£!óîߺ\úB=¸’ƒÂ9‚ˆsD/&¤.CwC©R™þzn'þöî=º©3½ÿw[²äûM¾–| –Lbc‡ <@(ÎJšgBÚ¡Î$§Iº’&]…öœ¶«ÐžL&˜Y“dæ˜ ©Ç$ ÎJ&™˜!„±!L¨o,_ÌŶ@ø.ÛÒþý!k[²dc[¾Hò÷³‹­ÛÖÖ«ý¾Ö~ö³Ÿ÷Õ‡‚¹£Ð´èñ“Š!©v¬ WBž2~FzPT’´lëò퉽FOØåL‡3‚‡ƒò…9°uÝäN1U—:p§k€#W9îó¿øê :R¼%ñ“z/Çk'ú^4>oƒ² ÆÎ®ŠŠ TVV¢ÀÒá¾äê^s@µ(bÐ)&Šø® I0Ö_•ùxjþ&€Ùªj=üfáxšM@ãQÜûg¸Pârð÷7ù ¸÷Þ«ÂyãÈ ÎÁK÷¹ÚsÄ1‚8FøA‚àœøŸ_°Otö‹?á/seìs䕾A¿ø£ë Ų-w­×ì<Ážc¢G_ÓÛy– ¿v° ®AXÁùrØ(È2ç]@öÔ¹kø“ï„—¿Ÿ7ö廓ɽ^À7’-°—¸ {–ëÔܸþ~éün/‹`9€Ôü¾ûüNqG¦ìö–R‘$Ð ·1Í £ñwˆDçl”n_ëþ©|­"‡¦|Ðw°Ò5Ð"S/ƒ|aö”Ög¹P‚3ÇÞFQQ—cqŒø1B›‚à%K·koسoõqß¡©ií´ïCŽº±€ýHPDâÝ£å!€räD‰µí2,JPú˃(--ÓÏÕÛÛ‹ââbüîW?v ÆŽÄìGp€_”]˜ ëV,Bjr~SÞâöXñ‡ö Ð+ÛsÍÆ.üÉ÷?ÂíN þß¿>„Ûç·ã÷¿Ú€Ü,þêï¾ÀñÏ›ÝÖñÕÈÍŠÃïµÿsü)|¯ §Î]Ç+*yKTøý¯ìWAäfÙ—wlÔ6¾ø;Üî´àØ¡G Ö?‡¦ò-ø^A*Þ8\‹âëçmßµu·MKËÏÏÇöíÛÑÞÞŽ£Gb```Üç 0;õø€÷`Ÿ|ëÎmÇá÷ox{ú¨õ‰"jç( –á÷þ€wDqξç@Ç YšðÁŸ£VdÿðãÓCxL'ÃÚ4Ö¯£‰;o´á“z«Kà@¶  íÃSÿ1j6ÀdLl^ŽÄ1bžŒò…Ù°uÝ€Õx€ýDÈk_ âOî‘áÑLžk§‰éqºYÄ￵Je X[0© A‘‰°Y:‡û›6³3<·ûâÑ£GQYYérŸ§¬XQ]2dƒSkâ@GÖéXÄúç¤åuø§ƒ_ãÔ¹k.ug×#7K…4u$à~ sç~ÿ« ÒóÖ­X„S+!mýá‡*Ý&åzèÁ…8þÓG¥ÛÅÿ¶±ËKY¯1QJi]1Q —÷oníÂöZiiêHÿÛ:¼r r^—-û§­¿9gÊ@LL̘ÏmooGhh(ƒ±óÔQD›‡I¶,X0µ¿E}} Ôknܸáúû@‹(bgLþuÀû*—ûãââ P(feº»»ÑÝÝ-Ýn¼#ŠØ*Hb˜ÞßôlšèÁ_Pd¢ti2`ŸUý÷ß+4AxPÃÉ|ȳ[}@í ¾h´ºepk W?ÀFâA#8FL’Bû0†"¸œù¤ÞŠsW­x(C†Å*}ަ—¢Ç?o†¹sÿñw#ûÅ_]Gt¤Âãdaë\„ÃÇôh6vI\ÇýÎb¢”ÈÍRMh»R“#pø˜‚<ùp¾W†˜(%Šÿm;ô4rʆ……ù¼¹ Æ>÷F9R“"±u}R“Xæl.œ\‚±©©©øîw¿‹ˆˆÙŸˆ¯»»øÃÐÒbÏêoœ°ÖÛ÷&†ƒ±Ãâââðï|iii³¾-‹ øãÿˆXeôžM!4¢…ZíqÌd@6À•––¢¡±ó‚µºül,A‰P>øW°\üHªÖ?dŸ]ý‹&Bä€:Z@h0 Žf†Î|ÖÑcÃí>àV¯èù2Zeº‚y{Y ÇŽ#æïQTTSÏ ,=CP.ÛâÕº'B,ÕÊ·ú€c­öò¡@\ûÆ;6ô Þ¥Ïå>5¥¿Bx‚´ìi¬¹d2™‡ž;·ìÛ*nxîSfÇF­ÇÀ©'iêH|¯ ¿)o–îs”pÎxudµ º·Æ\Ws«k@Ö“˜¨‰e{?ôž|ñ3¨—Ê'<ùpž|8 Û‡ËÐôÈÏχJ¥B|¼ç × 6'™±-m8rò2ö—œÇÚœdl]Ÿ…­YüÒf«ýá,\·n´Ú¹ëxôÑG¡×ëqêÔ)`xûRáŸ5eû|àt;55>úèœmR©Dvv6ÒÒÒðÁ```m‚€ÏD0(;aI¶ 7mBh–ûxÅ€l€3 04kÿ‘çMvj IDAT<è}e!$ !ü%†®×a°ñŒtè¼8êÕÞ°²ñÉ}ÿ‘+!S?yÊwî:QqŒ ŽÈ1!Çt ŠHDèwÿ†®×a¨¡\ÊPìÁÙ[}ìs4ÞW‚3V{ˆ”ÅŽL/#ôødE‚LØ¥ ¬[`V®ä‰bØ¿)oÁñÏ›±îÁ…øMy ¶oÔº•ÈÍRáq&ÔÊ[¢š¶mÊ[擉Sç®áøçÍ8õÕuÿ¼Ç?oÆÿ|Óá6 yG§ÓMé±Ùtº¶§k[ñê;gP¸2/.EnF¿¼T/ŠÒeô©©©sŒu¦ÕjÑÔÔ$eÊÖ‹"Rý0`è<9šB¡Àºuë|b»"""°nÝ:|öÙgöß“aw˜6 ÈÒÔvœ…Ù/̆µ£Ö¶Xo_u ¼¹ âC– …,a1±#ˆ8FÌPŸ“%,†µý l·¯ÂÖÑàœ%räJÅg"(6eÚúœ­–®ŒðÉÏ,­RðÕãd^ ™‚9þìxJ‡WT¢øÃz˜;ícÈèz°©É¨¾lò˜yÛl삹Ë2­µ]«.u &Ò^cÖñžÍÆ.ä=ùßxóWu ÈΟ؈wË/¡ìl>:×0÷Xðnùe¼[~¹ñx±0V¤#&BÉ›f7–srr|jÛrrr¤€ìM?m_çé³³³¡TúÎ>œ––†ˆˆtwwÃ"¸ š]bz~¿³ ȲøL)›ÀÖÝfϰ³Z`ë¼ÉÆ™Ï{¡1B최3gˆcÇâ1ƒí*±g:g;ZoÏÍË>7ïE%2åŒõ¹ ÈDX}8 똸˞) Øì³˜9—--ÈæŽ2ìɇÓpø˜=»859Â- ë¨ûÆáZ¼²}$0cî´àO¾ÿnwZÐ|rË´e›]¸ÿɱ}£Ö¥flš:éêHT]â´®óɶ‚%ØV°æn Žœ¼Œ#å—QÓÔ¨nìÀso–?/ ϬÏÂÚœd6Ú418—‹-ò©msÞƒŸ^NÇi9*Ê÷j$«T*i¢¯›`@vº0 KÓ÷cÜ©î/ù""ŽDsË9ðÆ>G3:¾G.€£(†#øé+œ·'(2Šœ¨=îZï6$*`Oýð_+ïZ«õ÷¿*t¹ýÊöl>¦ÇoÊ[ð^rŸXñ½«püófüð@%ît `݃ aîÀ?þäk4·váÿýëCSÆ>ôàB|ñÕuüð@%¾WŠu+á¡âð1=ÒÕ‘X÷àBÀñÏ[PuÉ„—¿Ï@ú|¡ÄKOäâ¥'r±¿ä+ì/9ïò¸#k651[ ²8ù<³SIˆ¹˜$ínT*•”…|«wOd‰ˆˆˆˆhÊd±)ô±mêííu Æ r%÷þ‚Bcòà \úÖëu€`Íwö»™JiÞ’xäf©P}Ù„&ÍŠ‰R¢êøŸcÇßÂ?þäkéþèHþÏK`ÇSS¯3ºc£U—Lxãp-þçRN­X„ã‡Á+*=¾×?þÍì€X“½¯í\C'jPv®wzÐÒÖ…ý%籿ä< W¦£pE'#"ŸÁ€,M™A®„8dñ™ìØ¢¢"—mQ,Ûâr¥†bÉã)a5~ YBàeÿãßx¬¬úÍŸûxš:§Þ-„¹Ó‚ªK&¤%G"Méö¼u+A¬Îã:N½[èvߎ§tnݘ(%ŠÿmŠÿmª.uÀÜ9à±~-QnFÞz¥@[½Ù²³M(;Û„Wß9ƒ­Yxf½ŽÑœb@–ˆˆˆˆˆ¼žøHÙââb®ÛâŒuPh `[˜ !„Õð¦*&J9«ÁѼ%ñltšG½Ù–›(;ׄ²³ø²îÌ=:ׄý%çaî±øÝçP©TP©TáWJä7ú´è4Fãò8²Dä×÷?åiX‘ÎBýDÄ1‚h®ŦÍ€˜¸xlÞ¼yÖ·¡¢¢'Ožä—Aó£ÏE&BqÿÓØ˜§Bfr,Äs·kBÙ¹F”mry,:\É"¢uÀ{ðþûÈÌÌÄž={\g@–ˆüš,6ñÉ1ÐéTl "âA4G‚"GJܹmrË™iõõõ8|ø°t;77ÕÕÕüb(` òÈbS I_MRÄÉéÚV¼wò2ÊÎ6¹eÃn]Ÿ…­YX›“̆"¢9Å€,yE‡@ˆH€ØÝÀ Õét³òÞƒ?ûÙϤÛjµ;vìÀøC~1DóDgïö—|…#å—ÑÒÖåòXJb$¶dáÅÂ\ÄD03–ˆ|²nóæÍhh½cU&—Ì…é&õCìn‡­« °ZØðŠÑ@‰†ÂK…9FpŒ ްk×.œkêÄùæî}±¿bÿö9!S"(2BDùÌT! ŠH‚u8 k4g% ÛÛÛ‹¢¢"ôõõâââ°{÷n„……ÍZ#¢¹su8øZÝØêÆ—Ç6¬HǶ‚,®Ì`C‘Ïa@6Ài4ô+Tµ„OÿÁÞP?¬×/ÂÚ®‡Íl`cÓ˜„ˆÈbS ×,gp–cѼ#t:¾0AvÇ<ý}®¿Öuj«—²‰Æêsò…K!KÈœÖ>•ë:ö Ù‚‚‚ý£ƒ±¡¡¡xá…6ãýˆæNuc;~ZVã±$At¸/=‘‹­ë³šÄã"ò] ÈÒäø†úa5^ÀàÕóÀ³nhûLw;†ºÛ1dø²ÙÎXÍÀ,Ç"ŽÓÝçϰ1hÂ}n°¡ƒMgœ²2õ²iÉš Š©k4güs>|Øå}þú¯ÿzÖk×Ñì0w[ðÞÉË8rò²[&,¬É^„­YØV°„ED~Yš[wjŽAì¿ãöØ¢(‹ã€Ð` E€+&¾½åzŸõF¬ Î,€|a6‰cqŒàá…¡ëul(÷xòãž8 9J`Ÿ#@ß ˆÖNÑµÏ Y0ØxƒWÏCqïãÅgzõA#¥oL&L&Tª™™P¯¸¸UUUÒííÛ·ÏZÍZ"š}›|Œ/뮹Ü®ÀÖõYxé‰\fÑßa@–&wÐwéc—ûbCÇ´2ä,ày"ÔÞqªqhä pÈ‚ÁKÃvû*÷>ÎfâA#8FL¥Ï5W¸eÅÞ¬Ëc±Š'?ȳ¾Aµ7Dœ3X]úÜ@Í1û‰Í^­_ˆVC¼cÏZ5 3-//Gee¥t{Æ ÈÏÏç—K4O,MÇ‹O,Eኌy3IWEE*++Ñ`éð?"òo ÈÒ„ 6žÁPs…t;D{:¯Z­ÆîÝ»ùÒ¼%öÃf6À`B‚yœˆÈß0 Kcÿ‘ê‡åB‰t{Q”#РòÞCéA0Þ]þ‚bS ‹åE8#ˆ8Fxìsý.'@Œ¥éôLž ·zG&Úl(‡,6BÈä'ÊE‚`ÿ[`0 Ñh¼Ú¶ÞÞ^KÁØÐÐP¼ð  ãG4O<º÷˜Û¤^ÞX“½ŸØÈ†%¢@#ŠP¨Õ 1 àJKKÑÐØ‹yÁÚ—Ùoïfèê¥Y›CäÀ߬b …föào¨é ÈrŒ è1¢¨¨¦žAXz† \¶eR¯u¾Œ|Q”€§îcFM¯ËåøI¥Õ^ÇyÈ‚ÁÆ3^O¬çm@¶··EEE0íuiCCC±{÷î MæM#"""òF€m‚€ÄM›š•åö8²Î`0ÀÐü-ûe-%õcÈøµtû©ûh¡™ñÌýÁøçòAöK‹­ Åg²a8Fäá˜c²¬·¯ºÔ}ê¾ ö9šv¡Áö@ÿÁÊ‘r!bÆêIgÉ:²cû|~~þ”·éèÑ£R06mÚ4áïTûùž¥éñã>ÞÒÖ…«m]ìÙ¯c©iêÀž6(Í9dÉóß‹Ræ[l(89͘¸PàÑÌ |Ú`¿,y°ñ ²#ˆ8FŒîs×G‚±ËÕAX¬bŸ£™±X„{â¬#¥ ¼Ì’5 S~mii)*++¥ÛÛ·o÷*¸KDþëÇÏ®÷ñý%_aÉy·Át—> "š*þš'†®ÕHËiYŸŽfÖºŒ‘¡Hìn‡8ÔÏFáAÄ1‰µ£Ác{Í„Çt#ãºÍlðj]F£½½½“~]EENž<)Ý^µjƒ±DDD0ø‹žÜˆCý»Û¥Û9 xI$ͬÐ`÷Äܶ¶_a£pŒ âáø¼·¯ºd¤'G±ÏÑÌZ¬ BÈðutbÿغÛ&½¥R)-O6K¶ªª ‡–nçææbÇŽübˆˆˆ(`0 Knœ3!î‰kÔѬXºÐ)çöU6Ç"Ž>oNûÍŽœ#‡ Ö¶É×b ‘–'SËÕ`0 ¸¸Xº­V«Œ%""¢€Ã€,¹øuÞ”–Y£ŽfK²Ó|!¶>3„cLjaÎ%Táìs4;âB½{½s@v¢²½½½(**B__Ÿ}ââ°{÷n„……ñ !""¢€ÂI½ˆˆˆˆ|˜­kärñä(¶ÍutûdzÞ–,˜H†ìè`lhh(^xác‰hF¤&ú×Ôüü|hµZÜ|ýuÄðë# L³ 7b§´¬ 㥑4;œk"Š=ílŽD#ˆæPh°8Òç'?‘žR©Dh¨=Ͷ¯¯&“iÜçÿìg?ƒÑh”nïÞ½†_MÈšìdiÙÜm¹ëóS“"ýêó©T*èt:¤ˆæ×Mäú\pÅhôxµ3dÉó¥ qLJ Y;ðs ì Y&ü:ÅýOcyZV¤3mŒcqŒàA4—‚"¡¸ÿilÌS!39¥¥¥hhh`/[ R©<¾®¸¸Ø%‹vûöísŒ#5zûE<I¾Ýßæ»èp…´\v®Û –¸=ÇÜmÁ—u׸óѬ¸ à=xÿ}dffbÏž=®ã8›ˆˆü™,6ñÉÐétl "âA4‡yd±)Ф/†F£qéwõõõ_S^^ŽÊÊJéö† ŸŸ?÷Ÿ%,ÝÎÌÌÄŽ;|¦¿ !Q" v·£Ï" üP˜ÏSuÅi‹VCa9éôÚÎÕ¨iê@MSÌ=ì/9ïñy?Þ¹šEDsŽ% æÁaMúbÈbS ÈyZŸˆ8FÍ%N‡øä ÈbSØðÂÂÂ'Ýv”-0 (**’îW«Õxá…|®¿É5Ë¥åò "KW*.U:í_‹–²Q¦YL„¿Ýÿ$¶®ÏòøxJb$~»ÿI®Ì`cÑœc†,ÍN'Õˆ5Ðh4(..F__ 44/¼ðÂÂ|o–HùÂl ÎKY²EGEìÞ$@à Jh’ mÀÑßL'D«!_˜Í†™1J¼õJömYŽÓu­¸ÚÖ…èp%Öd/BnFˆˆ|çw›€ˆˆˆˆˆf‚F£‘²MMM¨¨¨€Ñh`ÆîÞ½*•Êg·_¹l úÏ–N)(û×ß ãJ4Aõàg¿ÑgÆÊ•PÞ· 3ÃR“¢°-‰%!ˆÈw1 KDDDDD3B­VKË555°XFf>ß±c‡ÏÏ/ÈC Ì} J YÐgðúQ{=Ù «øýÒØzûíY±•ß2cåJ(–maíXš´ŠŠ TVV¢ÀÒáDäßXC–ˆˆˆˆˆf„N§“–ƒ±Û·oG^^ž0E$Bùà_Aˆ¹Ü¹¬Øû¶=û‘h´ª+ÀÞwÁX;!"Še[Áš4y&“ z½W˜ÙD²Dä×,Jp¦^ŽKŸc÷îÝl"âA4GlÝmÔ—£´QÌŒTlÞ¼˜˜ˆ¶¶‘±V­Z…üü|¿úlBH”˶`à›aë¸0u¯´j`C>XÆ€Pu(ÿÐGV,Èd#X»~Z'P«¿ªG÷×uצm}k²áÓ¹ÓÑœa@–ˆüûàÏl€É ˜ØDÄ1‚hN‰ƒý°™ 0˜`û…xUUU¸yó&ÁœR«Õرc‡_~>AåÒ§`íhÀ@}9`é`¾½~P'ˆxø«îã¾0Ÿôö•ߨ±¦ÎQ*£ Ð@Ÿ9+ýˆˆü²DDDDD4í Š‹‹¥`,( ¿ÿ\²øL„Äh0tõ°¿†8d/Å`lPü[{9ƒ¼ÅÀªû ¯NÜý» ¨¼T\™°ËA+!S?yÊw¦5+v>[šïÓë#"-€F¡P«=ÖÌg@6À•––¢¡±ó‚µ¬YDD#ˆæPQQL=ƒ°ô A¹l „ÖÐÐŠŠŠÐ××çrÿõë×¢¿ òg¬†<å;nYS'P~ÁþO "?[@Þb@Åyœüž©Ó^– ¢N„±Ý„uªË@ìŒùñ³kØDäW’l$nڄЬ,·Ç pƒ†æoØ/k!"âA4wôz=æ…ÖÖV BCCa³Ù`±XÐ×׃Áà1SÄû›s`Özã"[ÎK¥ {ÖìÑßGoÎê43gýí·Òp&lÕç’®±PF!8u9d îc –ˆˆ&„Y"""""šVŽ`,ìÞ½eee¨®®€Y ÈÎ&A¹úÈÕÀzû*¬×ë`ëh²f{pÖØnÏœ UÚƒ³Z}R0h}‡ÞhÂê @½Á½ÁÈw®DP|&d ³!‹MaÃѤ0 KDDDDDÓBE—š±Û·o‡F£F£q È2Yl d±)‡úam¿k»¢Ùàœí³¨ºbϺtЪ]  Õêx Œ‰–3®·0v _¯Úƒ±®Ükà 1È´/ÌfÑ”1 KDDDDD^³^¯u ÆnÚ´ ùùù­V+Ý?_JwòÈfK;kG¬m °Þ¾êRÖ@j£k@ÐQâ@«±gв­÷Löà«¡Ížý:RvÊ({=1²øL6"M d‰ˆˆˆˆÈ+C×ë`½qQº…‚‚é¶s‰£Ñ8/ÛH?гu·ÁÖÕÛí«chK8¨D„)èRìZU4³iGsd½šîذõW^‹§àëÁØálPl ‚"9á)Íd‰ˆˆˆˆhÊlÝm¼ô±t[E$%%¹<',, jµZ ÆÖ××C§ÓÍÛ6 Šô gÏŠý°uß„õV lfÄîv¯sG_Zª¡I ŠcϪ†ƒ¶˜Ykê´ÿëíŒí@‡Y„©S€¡}¬š¯cg  ŠÑ@—Š  'å""¢YÁ€,M‰­» – %.÷9—-p¦Ñh¤€¬^¯Ÿ×ÙÑ„(ÈB¢\.‰·Þ¾*eЊý³hú,ÂpvìÀ£spÖ¸•îvÎlrX‘ÌV`$Ð:ú9ZpÔÿcPFÙÛz8–“q‘¿ÈÏχV«ÅÍ×_G ›ƒ( 0 KDDDDD“&õc æà˜¬J XÇ|¾V«Eee%€ÀŸØk:8& vºÏzû*`µÀÖy¶î6ˆƒýïL¬„k@SðjÛ¥îÆs©€É˜Úk…h5„à{&rT S2øJ~M¥RA¥RùÛDþ£@€N£1áá.å›d‰ÈÏ)îËÓ"°"3]Ç¢Ù"õÃr¡bÿöÙ烗>ˆ"6æ©™ëöÖ‘õž#¨8zr)±¿¶>3Äžvˆ}öÀíðýãeÖNÕă¬ÂÌ4Äp¦«£ME(„ðE&Λ’A‘‰PÜÿô˜ýˆˆæÖMïÀûï#33{öìqyœY"òû“øäèt*6qŒ š%ƒú“.uNƒï}²{ÀU“¾š¤0·×8dM&L&T*öÍéà(y€á€m°‡ç8‚¶ŽeÑiÙq?¬–1ë×ÎèöG$2% (4F ¶ NËÎ÷ ÈC ‹M³¿‘oc@–ˆˆˆˆˆ&là›a½Q'Ý^ò¸[ÆæX233ÑÐÐÀ^¶€ÙÙ#m§£\Â]ßs¸l¹b@6ÀmÞ¼ ­·q¬Ê„ Hþ""ŽDsi×®]8×Ô‰óÍÝ÷ÙªÛÑÙ;€”„H¤&1‹-P ]¯s ÆÊÔË _˜=á×ët:—€l^^û›b•ˆˆÈ; È8Fƒ~… ²–ðyßï–_ÂóožDL¸ß¼½ 1JŸÞÞ–›¨iê@áÊ —û”_Æ¡²jT7vR#±µ û¶<Èž8FpŒàáãt:¾0AvÇŸÇÜmÁ_ŸÇ‘òË0÷X¤ûc•xñ‰¥^íw§k[ñ§ûŽcß–åÒzÝ{ _Ö]Cï‰güs}Y×êÖ¿¦ËéÚV*«FÙÙ&©Ÿ®ÌÀÞ§—ûôØ3t½ƒ—>–nËdC¡}xÒwêëëQXXÈþFDDDóN›€æ‹Ÿ–Õ :\seç}z[«Û±äÙwQÓÔárÿÁÕxîÍrˆ"°oËr¼¶s5R#±¿ä<6ø˜_2ÇŽ4«ûáªWJqðD5rÒUØ·e9~þòzìÛ²Qá ì/9çß,÷ËÏöÜ›å8x¢zFÖí4Ÿ®m•úéšìd Õ¨nÙêÆv|t®_ÖµÂÜm‘._4w»¤9;R~_ֵ޹>Ç{¶ÜìÄGçñѹF´Üìty^ËÍNÔg½]më’Þót­}½Û ²ÜÖí¸ï˺kü²‰8FpŒ ÷nù%T7v`ëú¬1O^¼õrH—åöe]«Ô‡FïçSQÝØ.õ±»­Ï¹ÿŽ~î—u­èì@gï€ÇÇG÷gO}¾åf§Ô×ïeî¶ åf'ZÚºP¸"í4Á†éÒk}­§}$+WBqïŸM)ëà€ÕëõìPDDwQQQ¢¢"PÃæ Ì¥yὓõHIŒ”2ÕÕàЉ¼õJÇÀÉÓÿú‰ÜìµðönYŽWß9ƒßîks’¥Çö—|…C'j\jç¥&Fâ×{sÉD{õ3€­YxþÍ“.ïùÒ¹xmçjÀ‘“—±¿äüðïe¼[~YzÏÞ/z>ðkëâ—LÄ1‚cÍGõÅ'–ŽùœÔ¤(\z{›Û_ÕíøÛ_üÁ¥ÞÏ'c2ëóÔG?÷ѽ#eÝëZöìl#žó¤[½Üÿ»ó».iG?ýùËë¥þ¼4=gßÜ [ ²°4=~äÀÏÃÚ¡áì¾ÂáìšùÄr¡gêå¸ôY0vïÞÍžvŒØ°"ks’9FpŒð k’úËxFc[nvâ±}¿?y= Wd ¥­SÚÏEQÄŸ]3áí0w[ðôO`î±àµ«±u}îôXÆ\ßcÕxmçj®H—úÒÁÕHIŒÄKOäâ·ûŸ”úók;W#51R+6øKÓãñó-ëQ¸2egñ£wÎHŽžìoßù^,\ŠÔ¤(D‡+Æý,~m?Ѳ&;Ùg¾gQ!ÂÈCfÁ¸ÁX[wõå(mT 3#›7oöø<µZ--3 K45íoDDä›X²€Þ»å—ÝŽ Ê‘“—ÝËÎ6aMö"üøÙ5HMŠBjRÞz¥k²¹<•œGJb$J÷>.”®Ì@éÞÇaMâ8`ÜV°©IQ(\™}ÃA ÇÁmjR”\IMŠÄÚœäq³ež³-m]Òß¼û1j6ÀÔÚÄK)àÇÇåÏ#8FøŠ»=9x¢Úe?‰P"7#G÷=Ž¥éñ8TV3©Köœ¼Œ–¶.ìÛò ^z"1J—>y¨¬FÊ/;Û(•Yxé‰\¤&E!&B‰·^)@t¸BÊš]›“Œèp¢ÃX›“,õ›Wß9ƒèpJ÷>&^ WfàÓýOJöÌz~üì¼ôDî¸u©ž¨ÆéÚVéÄ‹¯pÆk _˜=îóÅÁ~ØÌš¿7ЪÓé¤e£ÑˆÞÞ^v(¢Išh#""ßÄ€,4{Æ&lX‘îˆØºÞ^OñШ`HÙpPÔÓd£³^N×¶ÂÜcAáŠt·`ˆã`îÌE÷zkGe¾D‡OýÒÄçß,ǻ嗱u}Ö¤2ŠˆÈwÇçlWŽäËÆš\n<5Ãõ='·×:vô³‰(;ÛèÒg=õIÇÉ Ç{oõPgùzɳ8ºïñqÇŠ–¶.¬ÉNv;±‘š…5Ù‹ÐÒÖåL=.x²¿ä+¼úÎ,M—êîú Q¥eY|æ´®›Y²DDDÈBhD÷$'{œÀ”% \ii)[`1 X[àuÍ/ã8¨«mêÀŸî;îòXL¸-m]8]Û*e£8fUö$q<÷PY •y.­^ÝØávßtd¨9×ÀÛº>ËcK"Žþ9FÜípŽþ­¨¨¦žAXz† \¶Åo?Gt¸bJÙ/ë®!e¸ÀX}èNeÂëëìµoâ¿|gÌçÔ4u pe†”;º¯N„£snFü˜ÛþeÝ5´´uMêï¼ã¤ÉšìE(Ýû¸ÏÕufíh€\ýÀ´­O«ÕÂh4°Oìåœ5ËþFDDDþ. À6A@â¦MÍrO`@6À öËXûe-ó͡ՈW %1Ò%ËR#an²àHùå ]8úÑqÔpœ--7;ñô¿~‚êÆ—‰F(°õööâäÉ“X¿~=ÂÂÂ8FpŒàá§¥tšìd|t®Éå„…'ϽQŽ;½¼öÿ­–ê¨vN!;–¨0{Ù„ß— ð$uTøNeÚŸŽàôDË8øËIç’C×j¦5 ëœ)R__ÂÂBö7"""š7¥€uº¶-m]ãè,Üò6Žœ¼Œ}[–»Ôeliëì¶¾±x:ý²®U:Pœ.æn þtßq´´uáç/¯·–ù—ÉdÂ矎ݻw{¼ä8FpŒ ÙR¸2k¡²ê1²-7;qääe¤$FJY£ŽLRs·{PÔQZ`¬ Úñ¤:½‡óû_mï’NެÍI3‹õGo‰ÎÞü|Œ’޾º¶û<$ZÖ6Û³Ý'’ánL.橊/»Û!öwB™žZÔ£ëÈÍ'¬!Këˆc¢ž•cÏ*î¨9瘸§peR#q ä¼4 ˆã î½“õ.¯]›“,ÍÄ>ºfÜéÚV<º÷8¨™ÖÏôØßÛ-¥{c ež1™L€¾¾>±ÞÇŽ4§¶,ÁÒôx”mr«µ Œd€&¦sô!8TVíö|©O®È˜ðv8²Ï÷—œw{ìùÿ<‰G÷—²×× ×pw¼ó{¿w²Þ­„ˆ£ÄD(±&{¾¬»†êÆv·þ\ÝØ +Ò'´ÍÏÿg9ª;ðó—×ûE0ÖùêkGô­W¥R!44TúÛæø;GDDD40C–’¹Û‚Î5!%1rÜ 5^z"‡Êjp¤ü²tYïw®ÆæŸàÞgßÅÖ‚,Üé± ìlDˆn¯w½ÅLYŽDD3/8‚\ qȱÿlÝmŠHôzµaaaˆ‹‹Ã­[·ØëÈêt:¶7Ñ(*• *• !l "¿Ñ  @§Ñˆ˜ðp·cud‰F)Ýû8•UãÝò˸ÚÖ`¤ÞãTjÛÑÌRÜÿ4–§E`EúÔëÙ™L&=zÔ¯>7ƒ²#höÆ"‚â3a½QÀ>¹—§²A‘‰PÜÿ46æ©™;¡õêt:TVVŒF#²Dí“SèoDD4{nxÞ™™™Ø³gËã È¡ôßoó,6ñÉ1ÐéTS^GGG‡ß|ÞHõÈåµ ÊrŒ Ù#ˆ%Ždmß²‚<²ØhÒB“6¡õj4) [__‚‚66ÑL¥¿‘ï`@–ˆÈIH¬ —GúìöE,P¸ÜfP–ˆˆfƒ,>sFÊ8Oìe4ÙÐDDD4/0 à6oÞŒ†ÖÛ8VeBPd"„è.‚‚· §¯ó&(Ë1‚hvíÚµ çš:q¾¹›Aþ÷7re &Ëùï”ÉdBoo/ÂÂÂØßˆˆˆ(°W± ›F£&}1d±)ä,N¨AYY°RPÖ`0pŒ ò!:ñÉŦ°1ÈïÈ3¥e[Ç·Ó¶ÞÌÌ‘õêõzö7""" x Ȉ©e‰ˆˆ&ÂQ¶€T¶`:8gÉòoÍ È±‚²½½½l""òþà!~$›uèZÍ´¬S§ÓIËõõõld""" üßTl"¢À'‡jÉHý½¾¾>”——³aˆˆÈk3Q¶À9C¶¡¡LD4JEEŠŠŠp@ ›ƒ( 0 KD`nÛ‡¶šé¶Z­FAA†ˆˆ¼&‹Ï”Qìe ¬ÞPU*âââ¤Û,[@DäÊd2A¯×ã*3›ƒ( 0 KD~Ír¡g޽¢¢"6ìÁXã:¥Ûjµ»wïž¶«‰8F‘,a±´lm ÈÚºÛ`¹P‚Ò_Diié¤ÖÉ:²D“ãM#"¢¹'g‘_ÿ5`2&6ƒ±D#ˆfçbÑRXÀ%CVì‡Íl€Á „O.ïC£Ñ ºº ×둟ŸÏ†&‡7ýˆˆæGn"¢À`,ÍÚDD¢T¶C–i)[ Õj¥efÈ‘¿  EÜ“œìr%3d\ii)[`1 X[`ÿMDÓ¦ïÖlƒ¶Y{¿ð$…Û}Þc9FÍ®¢¢"˜zaé‚rÙ6ù-YÂâ‘,Ù¶{mY/èt:iÙh4²¿‘_K°M¸iB³²Üg@6À ší3àŠƒýl¢ití«N˜.÷Íê{†ÄÊY/Ýö63–cÑìÒëõl c•-ð†Z­–‚±õõõ.AZö7""" $,Y@D4E}·†fý=ûo[¥e–) "¢9;ˆ˜²Ηó1˜JDDDŒ²DDÓ@­V#44tÆÖßÐàz Ë`,Í5·² ³½ZŸV«Eee%Ö‘%""¢ÀÆ€,Ñ4Ø´i“×—VŽçù矗–Œ%""Ÿ8U¶ÀÛ€,3d‰ˆˆh¾`É""?Ã`,ùÄĨ²¶;­^­O£ÑHW›ôõõÁd2±‘‰ˆˆ(0G± ˆˆüƒ±DD4×äNY±VSã´üms`Ù""" T Èù!c‰ˆÈÈ-•–E/3d¸”ÿ©¯¯gÈÏÏÇ®]»ð €\6Q`ü†bùc‰ˆÈW!Q" v·OËúœëÈ2C–ˆÈN¥RA¥R!„MAä7ú´è4îò`@–ˆüœâþ§±<-+Ò£úsªÕjFc‰8Fùù¢¥Ô—„èdlzêId&ÇNi]Z­VZnhh`ã!(2ŠûŸÆÆ<Õ”ûÍœ›Þ€÷ßGff&öìÙãúû‰MD£ÉbS0tÇhÿ!Ü!b±ŠmB3¯µS”–…ˆ„Ií¯ñÉ1Ðé{Gݽ{7 4Íœc9FÇ"régñ™R@V¼ÓŠ„…ÉÐh¦Öç‡[·n€ô·ˆ\ òÈbS I_MOÔùÖ%"ŸÐ7(:Ù)Ù Pu:3c‰cÄ<#„à§v¸3Ьhít:`ˆL.[àpåR­Wïí\GV¯×óË ""¢€Ã Ù·yóf4´ÞƱ*Ó]LK?ª¡Òò­>¶!Í8F͇1b×®]8×Ô‰óÍÝ~MPD"lWÆ;6ä,q‡ ×;à”•.¿{ÕBç² —j?]7å÷Öh4¨¬¬à]Ù©ô7"""¢ÙÀ€l€Óh4èW¨ k Ÿø_ÌÈeaßšlxàG3¯æ†MZ–Ŧ°A8Fä¡Óéðí€ ²;æ ¿F‘–¯°ÏÑ,©uêsAQIw}¾sÙ‚o/ס··wÊWu¨ÕjiÙ› Ù©ô7"""¢ÙÀ’ä¾SD$BÛ/½ÕçZ·h6üd‰Z6Ç"ŽŽÏë€þöÖ¨ò D3 oPĵ.§q?æî5\G—-¨ªªšòû;—,0™Lèííå—BDDDu\Í& ;F|¦´|ªÑÆ¡uÅdCÿÐð e‚"Ù(#ˆ8F 誽Á€,Í,çq]ˆVO¨d`/[ààM@23GþÎxS¶€ˆˆˆÈ'©Ùä‰la¶´|ÞhcHšQÿUeÙ÷³A8FqŒÅ9ÐõáE+³diÆô Šø¢Éæqß»ëß§“uÕÕÕ^e¶j4#Y¹œØ‹(09Ÿx±—äñ-Γ:Is¡¢¢EEE8 †»Q@`@–<ÿ ŽM=R¿ëØÅ!6 ͈/šF‚y‚\‰àŒÕlŽD#F‘«”Q€þ!f¦ÓÌ9Õèš‘.w:w7BH”ËßÇÄ\Sáü¨¯¯çC€œû¹sðÓÜêƒ4ÆÅÅM¹&öt1™LÐëõ¸ €U±‰²4&çƒÞÚ®ÙDÓ¡µSÄ'õN™oê&|Y¤ƒåB Î{EEElPŽÄ1" Çç>÷iƒ¥ hÚ}e°áÓ†‘±\¡+˜ÔëmÝmÀàHVlEEÅ”·Å¹Ž¬Ñhä—Cä¡¿Y.” ô—QZZê—Ÿ!>>^Zþ¤Þ·®þø¯ª‘dƒ¹ÎŽ%¢ÀÄ€,I›™z™tûØE+þhÚô ŠøIÅtæYˆH€<å;“ÿ1j6ÀÔÚÄË9FLj€#ä ³]²ß«â¤z4mZ;E|xqäHPüb—!öCì½%Ý60™LSÚ•J…ÐÐPûxÐ×Ç:²Dú›Íl€¡ù[¿í«V­B\\{6ê/þèAÙc­¸bÙŽÂÂBîpD4í¥q)´»üýâCøÊÀ,8òþ ïŸÊ-r%”Kÿ|Ò™oÄ1‚8FÌ7Êܧ¤ ¾ú‡€ŸT ùdÝ=ò/µ7ÜO€(î}|ÊëÅ‘@†7“{iµZi™Y¢À†^xAº}Ådÿûÿiƒ WLâ¬g¯˜ìW{½öÅ ËU_6l`†,MI(âžäd㈜MØJKKÑÐØ‹yÁÚ‚)ÍL­Ì} – %»ÛÿUmEk§ˆ?Õ!4X`#Ó¤|e°áË֑út˶@‰bãpŒ ø1¢¨¨¦žAXz† \¶eÒ¯ä!PÜûg¸PqÈ‚þ!à`¥ïJçyvš¼/šl8æ”;'@ad쯨¨@AAÁ”Ö£ÑhP]] `jYoûÍÁ«œ±¶î6XÛô°Þ¾ X-Ò¥“4Ï)£ „D!(4²ÄLÈâ3Ù&#8FLjìs íÀöaXo_…µ][WÄþN)[Øç„(E&B– …,6eFßN&“!77Wš”Ë›²•••ì²S ŒˆˆˆÈ—0 K^ ŠHDPD"‚ÙDÄ1‚hÎÉbSf<ØF4yyyR@vªe œ'öbY""" ˜ãd6M·¼¼<„†Úg•30 “^‡óÄ^&“ &“‰ KDóN~~>víÚ…gä²9(Àí9u ¿¬­E§%°Ëp1 KDDDDDÓ.,, yyyÒ튊Š)­'3s¤¤ÉT‚ºDDþN¥RA§Ó!@4›ƒœ±« ÿ\Y‰¥‡ã¹Ï>ÃgÍÍ~ù9ú\peŒ“Ò,Y@D~MqÿÓXžéœˆˆ8FÍ¥ ÈD(îóTÈLŽ`Ï’uÔ€­®®ÆæÍ›'½^N‡††ö€¬s—ˆým¤¿šÏš›ñYs3Ô‘‘x$- ?ÈΆ:2Ò/¶ý&€÷àý÷‘™™‰={ö¸Žãüz‰ÈŸÉbSŸœÁY—‰ˆcÑä!Ŧ@“¾X*5à\¶Àd2y]¶ ¾¾ž M4F#" TÆ®.ü²¶«KJðtY>Ðëý¾¤²DDDDD4c¼-[àlrdÊQ`úß«Vá/´ZD)?{ý:öœ:…Õ%%Øsê¾éèðËÏÉ’nóæÍhh½cU&E&²AˆˆcÑÚµkÎ5uâ|s7ƒæ o˨T*ÄÅÅáÖ­[ìe &’ÈþFDDäîÇ¿¯[À^²àƒúz|ÖÒâö¼Î| ×ã½êÈHü ;ÛÈU*ýâs2 à4 ú*ÈZÂÙDÄ1‚hŽét:|;`‚쎙Aó†£lA__ŸT¶`²—Xk4šIdÙ߈ˆˆüÛ#iix$-  >knÆ/ëêðÉäö<ÇD`ÿ\Y‰GÒÒðZ-IKóéÏÆ’DDDDD4£¼-[à\Z¯×³A‰ˆˆæ‘(¥¡Óáã?ÿsœÙ²÷ªTc>÷³æf<÷ÙgX]R‚®¬„±«Ë'?²DDDDD4£œ²ÕÕÕ“~½Z­––§21ud¤K}ÙGRS=Ö›õõ‰ÀX²€ˆˆˆˆˆf”·e œ3dF#z{{Ɔ%¢y¡¢¢•••è°tøÙý 'o=úè¸õfÏ^¿Ž³×¯#J¡À#iiøAv6îŸÓíf@–ˆˆˆˆˆfœóä^åå娱cǤ^¯V«a4سdƒ´Dôz= ƒtâf¶¨T*ÄÇÇC«ÕB­VûÜ “É$•kIánBä‘s½YÇd_£ëÍúÒD` È‘_³\(Á™z9.}ŒÝ»w³Aˆˆcѱu·aP_ŽÒF23R±yóf—Ç ¤€lUUդׯÕj¥€¬^¯g@–ØßÆéoþÆd2¡¸¸Ø'jD«T*lß¾c ‘ŸŠR*ñƒœü 'ßttཟ57ÃØÝíò<ç‰À>~ê©YϘe@–ˆüûǨٓ0±)ˆˆcÑœûa3`0!ÁîSUh4ÄÅÅáÖ­[èëëCUU•KmÙ»q.qÀ:²Äþ6~ó'UUU(..F__ŸOlÉdÂë¯¿Ž‚‚lÚ´‰;‘»7>ÿ;>ÿ;?_ Ξ½~Ýcæìlc@–ˆˆˆˆˆfE^^Nž< ^d}!‹Žˆ¼çÈŒuÆ®M  BrÔìmÇ­^ÀÔ+â‹&ú‡ì÷•——C¥R¡  €_Qpg¿éèÀ øem팾_(B¡V{¬›Ï€l€+--ECc ,æk ‘ÈF!"ŽDs¤¨¨¦žAXz† \¶… BóN~~¾K@v2441˜cr0•JÅþFäÇ~úÓŸJÁØEQžÉ“!9J˜ý JÖeá½*+ênŠ€²²2äåå;Ö‘ï3vuá—uuöÒ]]³òžI¶ 7mBhV–ÛãAüZ›Á`€¡ù[ØÌˆƒýl"âA4‡ôz=L­M°™y¹5ÍO޲¤²“¡V«¥åúúzö7"?VQQ!Õ…‘;—Ëç&ë$4XÀÎår,ÞŽ¾¾>”––òË"òCŽÉ½ÿïÿÆê’ü²¶Öc0våÂ…PGFÎúö1C–ˆˆˆˆˆf7e t:°Ž,‘¿sîÃ¥!.Ôw¶í©û‚p°Ò À^VˆüÇgÍÍø¬¹ŒSÞ(J¡À_hµøANΜcd‰ˆˆˆˆhyS¶@«ÕJË Èù7ç>œ/øÔ¶-V°dY¼Dä»&Z’à^• ?ÈÎÆ_èts¾Í ÈѬq”-¸uë–T¶`¢Y²Î“b82e‰È?9÷a{Ô·Ü|{˾\__pˆÈÝž/¾7¥Pà‘´4ü ;÷ÆÇûÌv³†,Í*çìd²dä´ÀÝëÈQ`9{íŒÝÝÒí±‚±êˆüûC´5R IDATáÌ–-ø÷uë|* 0C–ˆˆˆˆˆf™7e t:*++íaF#³Öˆˆˆœ±« üÿìÝTÓwž?úgø‘ &ˆ àˆŠ-‰¿¥¦?,3SSéévzˆÛκ"ÚÎtzçÂÜÙ3ý3ûéÙQ{ÎtîÝN»;söH»Ýq¦ŠÚîµcضë]h-Ø©±Rh5¡U41T05(„Éý#$$$@ø™_ÏÇ9cò $/òù<óÊëm0à-ƒaÒnXØ¡P`‡B-ÙÙ}ŸÈÑ‚?¶ ¹¹………!_×ÈrŽ,ŃÂÂB( ܨ©A:ËAqij8שÎÎI·Ë‹ñ܆ Ø¡P M$ŠŠûÆ@–ˆˆˆˆˆÜ£>ŠãÇpwɆÈæææzO&YA™ˆ(VH¥RH¥R¤°< t½¥×£oppÒm[±;”J<¶reÄÝÝúL&¤/^ì7` KDQNxï.Ü¿RŒóÒX "â1‚(Œ$YÞ» OH‘Ÿ³tÊí ¼lkk+îܹƒÔÔÔ)¯ç;¢Àb±„|=¢xÞ߈ˆ¢EUc#ÎvuMxy®XŒJ%v(È•H"ö~ÜpÞ|ùùù¨ªªò»œ,EµÄ¥Ë‘‘“¥RÊbDa$HJAâÒåçÉ _6u@*•J‘›› “É`z]²ùùùÞÚ ƒß"aDÜ߈ˆbÏ™Ì=6FfÇ'ðWJDDDDDáàÀNgq/ßýqŽ,QlJ ñÜúõ8SZŠ£MÌ„±;dc^II :®ß‰ $H²X"â1‚(Œ***ðÑ•>|Üig1ˆ0»± ½^FÃýˆˆ(F¬•JñÜúõ1ÀŽÇ@6ÆÉår ¥H¼º˜Å "#ˆÂL©TâËA {­,f>¶À·CÖs]îoDDDÑï€Zѳaç Y""""" ›ÂÂBo—l¨¬T*Å¢E‹Ðßßþþ~Æ€Õ‹‰ˆˆ(úLÆ~~ó&ÎvuálWúŽ€ëlÉÎÆ™,jÂ\²DDDDD63[ P(ÐÚÚ d‰ˆˆbÔ[^Öé`²Ù&Þ¨« o Ü‹=¯RaKvvDß/.êEDDDDDaã[àêâ^¾¬aôEQ,jnnFuu5Þð)ËAq¢Ïá@Ù©S¨jlœ<ŒçlWvÕ×ãõ¶¶ˆ¾ d‰ˆˆˆˆ(¬|Ç„È* ïi£ÑÈ"Q̲X,0 ¸€S±)^쪯ǩÎÎ /_+•"W,žðò[ZðbssÄÞ?Ž, ¢¨æ8_‡3ú$\<•ŒÊÊJ„ˆxŒ §½C†Ó8vYˆüU+PRRòuÇ-°X,J¥“^Gé³òòD {q#ZXÛ·oGqq1Š‹‹‘á§Èõbs3>·XüÎÛ¡P`‡BtAŸÃ³]]x½­ g»º¼ç¿ÞÞŽÇV®ŒÈñì%¢è~2j5Ârý ?ªHD¥R‰}ûö¡±±{÷îÅÖ­[½—566z«©©a×l3Ùlès8f~}»=¢ïY"""""г[ÀY"¢è¢ÑhP]]“'O¢¢¢Â;ÚÀf³áÈ‘#ì–C[d2ïéS3þ>¾×];Å"¡áÀ@–ˆˆˆˆˆ"ÆtÇ( ï鎎ˆ( eggc÷îݨ¨¨ÀîÝ»Y8æ»×ËçÏϨKö¬ÙŒ·|÷œÍâ`ó…,EŒñc ¦ê’MMMÅ]wÝåý?»d‰(Ö¢¢¢{lŠÁûg6›QWW‡íÛ·cÏž=8räˆ÷2q„/ÌDsÏ7<5ÙlØU_“ÍòõOuv¢ìÔ)ïÿsÅblÉÎŽ¸û™Ä_5E’ÂÂB´¶¶ZZZ Ñh&Ý^.—ã믿 ¿1DDÑN*•B*•"%†î“ÍfCSSÑØØèw™X,†F£Aii)²#0H£ù•+‘à¹õëñz{;às‹Oüçb‡R‰ÇV¬˜0\=ÕÙ‰· †€1Ôê°ÜÝúL&¤/^ðÜ„,E5á½»pÿJ1ÌKc1ˆˆÇ¢0JdAxï.9 ×hN°…#·®!ågÊÎÇù:œÑ'áâ©dTVV² qFz:0Øv"ø…à w4Àeë†pí,4†®|ˆá+N¸Ï ~rÉùÛÄ7BæœÓÞ!Ãi»,Dþª“§Ó1~l^¯Ÿ°ÓG©TzY£ÑÉZ²Äýh!Íu [VVÆ@v”J%<èiP__ :{½­ /޾!;]}ƒƒ¨jj‚ÉnÇóúøa K4Kƒßñ c].÷_ï ÀÏßhs) 6×OF­FX¬?”H‘È54àîÒó=/È1bä«vŒdæ#13ŸEã1‚fóû¶Ýð c].—ßåž}n¨£Á=6D²ŒE›ãcžÓj„Ñ ¤$ÏíÚÁ=ô---² …Â{Ú`0@µöQþbˆûŵZ µZÊÊJ466¢®®ÎÛeí k³³³±k×.ìÞ½;ânÿ[z}@›+c‡Réž+ú]Ö78ˆ³f3NuvÂd·{ÏY§ÃZ©­\q÷‘,Ñ,_ø9o~á÷ÂÏóbo|àâ´1ÒÓÁÀ…(Ž }Ñà÷‘éñÇ_ƒßÁ¢ÌŸ²hD³0ØÑà·¿ùîk.—Ëoºò!D¿Ë¢E‰ÂÂBo {áÂ… ·ó]ðËd2=UDD ëܹs,B 6oÖ3cÖl6£©©)âÙ>‡Ã/ŒM q@­ž2T}låJüª°0 ³öÅ––°²)ä.„¹¹A$e ãŽ;†ŽËWá°"YQ„q‹2‡œV£ßÿÇ,¡¬½›,ñOÇ[`÷¼/¿ÀhØ×@/G›Ä¸êêjXnÁq{˜Ÿš˜®Iþ.ßÿÆÿ §È&—Ëq×]wá믿F?.\¸€‚‚‚€íRSS‘›› “ÉøóÑc8™ûŽììläää ££#àyQ$y½½ÝofìÑâb¬ÍÈùúÏmØ€µR)vŽk0ÙlxK¯_ðy²Ë<# kçN,Z½:àr²1Îh4ÂØù¥ûEÊÐ 2Çœý½ÓÚ~äÖ5$ç}“…##â„ï8“`Ozï©ÎNïéçUªi…±[²³±C¡À[£Ï½?·DÞ3²D³ HN™Ööì>$Š3I¢ÀUÞ';¦$‰X3¢™þM1XõëšeU¦3¶ eô£ŠÁ>@DDóK¯×ãÕW_Ecc#÷BS~áŸ^¯GGG¶nÝ ‰D¢Í1›Í†úúzÔ××C¯×û]&“É ÑhPZZ‘µ÷ OŸ[¿~Æß‡,Q ó XCyŸ°ˆ/üˆâ‰@œåýuHÇ.0D4óý-9HIú&Üçüæ8óMÒ¨êØß…½Æ-,N‡òòr¿óôz=ÊËËqðàAo(ÛÔÔ„ÚÚZH$8p ";5£QWWªªª¼a¸¯­[·B£Ñ@­VGÅ}É‹‘&šyÃJn„ý\Ž‘h6;PºÜýâþ‹†øþë}!$Bâ7Ö³hDq$Y~_Ðó=‹ ù†C‰2ˆf+I¶Á{Ú3Äw,ˆo8—Ä}.*ù°uÉ[8ƒˆˆFmm­÷´J¥ÂÖ­[½ÿß¿¿÷´ç£ó6› UUU0›Í,Þ ™ÍfÔÎKõ,Üå!‹QVV†“'O¢ºº:jÂX~sdgÂ4n Àz>>_YY‰ƒB,Ãf³ù¹Z­ëëë±gÏlß¾Z­Öïr•J…½{÷¢±±eeeÈÎÎŽšû–+p²³ U}Çl‰ÀûÏ@–h–—.‡pÃSîY‘Á$‰”÷M$fò…Q<®yÂÊ#gA´á)¾aC4GDŸ‚ }âIAº¢ O±PQÊ3¶€wlA0Ê^I™ˆˆÆ/õÌ(õ=&ûŽ*ðP©T¨®®8Ÿ&f6›±ÿ~lß¾ûöíó›+‹QZZŠ“'OâàÁƒ~¿ƒhò܆±O<½<ðOWŸÃáwÝ>ãŒ"gÈÍÄÌ|¤HžÅеspÚ»á²!H—#Aœ…$ÙzÎ…$Šc‚äˆ6~#=¹Ù§­®áR– )3Ÿ]zDs½Ï¥,AÊæR wµa¤§N{7÷Ü÷ÄÌ|¿± ü÷ 6GÖwlïØ "¢hTXX…B55H‚Û¬S¥RA§ÓyGøž¸;>ÍfsTur†C}}}@7¬B¡@ii)ÔjuL,¶C¡Àëmm0ÙíxË`À™ ;¦ñFkŸÃ]õõÞ‘Ï«T9O–,Ѿ*ŠX" *13ŸòD ù$W¶ák ¾ì;wššê·o Ë…½ˆ(ÚI¥RH¥RDúç©<áªÝn¸Ì³à¢§‹Ö—D"ÍfCWWÙ‰Åb¨Õj”––ÆÜ§BÒD"Ô>öÊN‚ÉnGUSN]½Šç7oÆÚŒŒ ¯×çpàÔÕ«x±¹ÙÆîP(ð|˜ŒÐ  ÏdBúâÅ3îÈQTÞ» ÷¯ãÁ¼4ƒˆxŒ £I„÷îÂSRäç,·Ÿ#—Ë‘›› “Éä[à;[Ö^ø.œè´w#Ág¾0÷7¢ù¡P( ×ëº]e2 ««+à:¶_|)’Èd2TTT@£ÑÄD7l0oéõ0Ùíx,/¯·µNuvâTg'r%äŠÅX›‘4¡pÖlFßà ßÌX“͆]ã:ŠƒùÕCMöÎÄ ‡àÍ7‘ŸŸªª*¿ËÈQTK\º9éP*¥,ñAF‚¤$.]yž òe©óú³ qüøqÈŽç´1%îoD ¡´´û÷ïÇ«¯¾Š½{÷zÏW*•‹Å0›Í°ÙlÞ0Ñl6{·.æD‹Ö¹°Óñ–Á€³A‚{À°šl¶ //Ôí<µ ‰,E•‚‚o ÛÚÚtlï¨ç­k€l= GD4Ï4ßœÓŠŠ oøªR©ÐÔÔNµZ ›Í†ýû÷p‡±\qjZ­õõõsöý *++YØ0` ãJJJÐqýN\° A®"â1‚(œ***ðÑ•>|Üig1ˆfA*•zÇÁ»dÿöoÿo¾ù&Ài¿Á¢-½^¥R N­V ­VëíŽõŒ+¨©©Á«¯¾ ½^~Z­fñBÐÕÕNÓ÷q­tá?Ùæ°ÈÆ8¹\Ž¡‰W³DÄcQ˜)•J|9hAb¯•Š𥩯<úè£Þ@ÖeïaÁˆˆÀž={Îó ^ÿ1»4§C&“yO› žÅÖ"ɯ¦C+ÈQÔ elAZÆ7Ðwó+ÀÈ­kH\ºœ…#Ša­—{°iU& B Åb1T*UL/P5×4M\Ì‘ d‰ˆˆˆˆ(ê„2¶@šç d dç…Ój 8/!]ÎÂм»z£o4\§/áj·-àòM«2ðô¶Õسm5ÒÅ"lìÝ»—!QÈQTšjlAzfŽ÷´ÓÞ÷õrÚ»a‡û´­ñœ¾×ÐÀ„ÛÎÏ+QÄþóëÉ)H,sÿ'Q46ß>ȶ¿¬vÊÿå4´g¯Lº]ëå›h½|?{í ž)ZÿçÿøVÔ³ÍÍÍhiiÁ€£_‘ŠaìÂ1›Íèêê‚L&Cvv6 eÈQTšjlÁ’L™÷t°NÎXâ´á€ëv_À:ïÁêL ;‚þNFz:¦x;Îz\ÁâL÷ivæÆ¼ÖË=øÎ ‚õöØczÉb!6æe6æeàÓ+7´Í)ýãéKx¿í:Žþò;Q9ÒÀb±À`0ØçOz½/¾ø¢ßl^FƒŠŠ ¿ÑUUUP«Õ(..fÑ"Y"ŠjŽóu8£OÂÅSÉODÀ¤, % ’,RÒGÿ]Õ¿÷HÙßÂÍjwø…±ó2ðãí¡ypUÐÎW«ÝíG—ñëºq­Û†«Ý6ìzé]´¼\Â󨼼|Æ×=xð 8³ÙŒýèG°ÙüÇthµZ˜Ífo õz=ÑØØ­V‹DÍœÞ]Z-Îvu-èÏê_YY‰ÆÆFo§¬Z­Žøû¶C¡˜öø€³f3úñ¹ÅÿsqÏ­_4ÑÔo¾äÎCP½ À3²vîÄ¢Õ«.g ãŒF#Œ_ºŸø[9•ˆˆÇ¢…åÛBDs§  ÀÈ677£¨¨(èþæí” §½#]méù"¤™¯wIÜÝ®ò,w÷ëX7©€¿ðIÈGß[v×Ë¿Vz#`êq‡¶úk® c\ö Û{£» ÍÐ&Ê6ðë0Òž½ À=7vŲ™I‹ðíõ9¨ÿè ´g/3 3¥R‰êêjTVV¢©© ?ûÙÏpòäI&D …Âïÿ2™{Ë`##Ôj5êêê`0¢#U*§¥Ñ€ºÏáÀ‹--xkô9ÀÙ®.-.)”]h d‰ˆˆˆˆ(ªùŽ-0™L°Œë( pôFn]CâÒ…Y§|¤§Ãýu³vLºmn& È a¥i ^çšRîl[úÜ!­ñ† S𑞀vب’DHÌÈGb¦û‹NÛ÷>­Ù’7«ï³iUê?ºÂ‚F•J…¦¦&˜Íf¿ù§œX,à~“ß·VJŸÓf³ù&ˆ§1i"¨ÕH ñz{;>·Xð²N‡_Ÿ/ÈQT 6¶ÀWâÒåùªàºÝÌc ëèðñc wµOÂæç¸ÃW…ÜwÎ+CØ…$M ×Xç®»g¡1ƒÑÝAÛq}ÜïcØ‘¯Ú1òU;)K˜y’ä÷C% ÅE3Ïb^KÏ®ËíÛës|ŒÚÍ,j„ðýˆ½§Ë“&¦V«ÑÔÔ„ÆÆÆ€ðZ¥RA§Ó„µz½>îêô«ÂBœêì„ÉnÇëííxnÆyK0 d‰ˆˆˆˆ(ê[à+!m™7uöݘ—Ÿ?òU;†®}<á6—öÂc_¾sh]½6ê0lÔ!!]ޤì HüÆzqž-Y, zþ_]'È@ IDATýò>h7ãÛë³ñß/=ÅBE‰D±XŒììldOsnh<Òh4¨­­E}}=Š‹‹ý:c t:ôz½7µÙlèèèˆËZíP*ñòè‡Sxnƈº} d‰ˆˆˆˆ(ê[àK°8Ó{Úw§Ùr `¤ë3 Ï {—Ä…‚{€ÂõȳÀFåãÊ »gÏ6·»pá øÍŸuZ´!¸ü!’ä÷!Q¶Ž‹Í“kݶY]ÿƒöë€åY\i>R”••¡´´6›ÅѾ}ûP^^ŽýèG¨¬¬ÄæÍ›‘ •J…ºº:èt:ìÞ½f³5550›Ýáñ6b‹OÇ5Y"""""¢y0~l/ß™±®^¸†f˜ w6cèÚÇc ‰ÜÞC륜!l,‘g%Û(Ù6Ö5Ûòü[C§1tå ’ó¾…$9gaΕåY\ë¶¡õòÍY}Ïõ7æe°¨ó$Ø¢R¡b‡ìÔôz=êêê ‘H`³Ù°oß¾€mšššpß}÷ù'“É8Ÿ7Â0%""""¢˜à;¶ÀårA  DKráêuwÎ:mÝ3^ØkäÖ5 uœMp—ÄÍ7(¸Û3–bú±6Ú9«)šÛÓç}F ;0ÔqÃÆsH^ó[D.–=¼>o4\BýGW`µ;.žþ,Ù÷Û®{ôzxCNTÝÿÂÂB( ܨ©Az„ßÖòòò_÷ܹs|°OÁ3?v:d28wµ2Ex×5Y"""""Š ¾c |ÃXHdaÄÈÞº6íÌ5<€á+bØèßý•Ÿã£÷ Pp»aã‘4ÍÊj æÏí‡.ï8×@/?9Š$¹ ÉùE,Ö,h¶äá†K€ßi[ñBéÓºþÕ}ø‡?À=‡öém«£ëq&•B*•‚ïõàX …ßü؉( ¨T*H$ñ7¦ã-ƒÁ{:M(\ðŸ? @ŸÉ„ôÅ‹!—Ëý.g KDQMxï.Ü¿RŒó¸º-ñAN ’,ïÝ…§ ¤ÈÏY–ÛššŠ‡z---—%Þµ#¦óÜ]®ÉÓø¾®á8Î×ùuÅz:b ×1ˆ%·Âu@á:NŸ´Íc³ÃFFn]ƒhãß@’3ûÛBÒlYå[𻓟âém«±bÙÔµ¼z£o4\ÂïN~ ëm÷/ä'Û7ͨÖBsðàÁ¶ÓétÐjµèêêÂîÝ»QQQÁâ… ¸¸eee,Ä$ú¼ØÒ‚³]]Þó¶„aÆ ‡àÍ7‘ŸŸªª*¿ËÈQTK\º9éP*¥,ñAF‚¤$.]yž òe©a»AÙñ2ïi×íž¿ŸÓÞ Çù:¿Y±ÛîuAS(àh ªh³{œÁ±Z¿í–µ÷ÀñéB´¹tNüŠ”ým!½Pz?Ê_i€õ¶kþÏ?†tßžlÅï´Ÿzÿÿô¶ÕÓé uN©J¥Bii)ÊËËqäÈlÞ¼jµšœB<ÌÙ}K¯‡ÉnŸÑu?·XpÖlFßà ßù;Šˆ»Ÿ d‰ˆˆˆˆ(f`Ñ¢Eèïï÷;_’A’®a0ì€ÓÞqÖ¤ßË5<€ÁÏÿËÆ.Ï>Ž' )IÓ€¿{R€ _¿ÏÝ-ë²÷Àq¾nÎBÙxóLѼqú>h7‡|‡7äàwÚO±ºÒ‡;í,Ñ<òí’MÈÈ÷ž/H—Ãuó î…½¦ d?=áS°HTîäüsIÓy,Þü_ ÔwÿßeïÁÐÅw!Üð‹3Ç~ùJ^zgÂË7æeøý_³eZ^Þ‰M«2Y¼åé¨5›Í0›ÍqÑ:µµµ¨­­Ñu<rs¬xnýz<·aCDÞ6²1N.—c@(EâÕÅ,ñAfJ¥_ZØke1ˆæ‘o ë²w{ÏOgÁé d¿dë'ü#·®Ái5`K³<öËï?zoô±ÕÓ‘›Hôy³€B“.á¿_š^˜Í06²Ùl6ïé®®.²4k¹b1¶dgc‡B–Ù±¡b KDDDDD1Åwlk ×;ž aér ³¼aëD†¯|è=]´™a,ÍNá:ÀÒ ÔŽ7¾vŽ,€ššïiEÎùŒ4*•jÒE½Ìf3ººº¼ÿ@ii)ÔjuÔÔ÷¨F¿K²DDDDDs|»d‡ÍŸB¨xÔo d‰æˆk C—?t,Îj„@œ…q’ä*$H–±@Dqn¤§#_µÃië†kxÀýÑÙÌ|$}c=É\i™h® Ïa¤§ÎÑù¡ â,$Ê6 i’™¡[ ,LÁÐà€ÿØñ2ŒŒ²®Û=@°@öëËc§Á0–æŽ$Õ…~Çècjð B!±X,0 €å±¶OH$8|ø0gÇÎßnZÏŠ d‰æâE_W†:€a‡÷<—½#önŒ|ÕŽDù}æoc¡æã|Îè“pñT2*++YŠ8®¡ ]z#=þ/ö­F8­FŒtµC¸æ;|ã†Çš«}n ŽOOø-ää¿ÏµA¸ö R–°XsÌiïÆá4Ž]"Õ ”””„ý6}cÕZ/w?_óŒ-Xº#_µF¾¾Š¤ÜÀ~º·½§oõ¹†²4Gz|Ött:ìHŒ¡ým>=]´oÈ™r»ÖË7QÿÑ÷u¶­ÆÓE«ù [`çÎ i;½^ºº:Ô××£¦¦`ñæAcc£÷´X,fA"Y¢Y¹u Cß|ã9 §¤!I~ 6×/þ¬FX¬€…¥ 5xñïŠÞÁ¸ìÝp|r)•³S–ÇšÁÂØñ ǧ'òÀ³,Ös ¹ç®­@JrBDÜ&™O ë[ÊYp±÷ô×6î ©`Ø¡Kï²`D³4tåàa¬ßþ÷!CW>dÁâá…誵€È=£Ó5Ћ‘›Hû²½p ôy•äß·rú]8Á^ìAèœä#”D ýÑü u,ïøŽŠ#£c 0ìp-HqäÖµI¯*ýT¹'”½ðËI¡»ððo;Y“…`µ;ðA»™…ˆp6› 555Þÿ+ eÈd2¨T*.êaØ!K4 ‚$ÑÔÛøtè°ó(ÎŽâ¬)»ð|A4s ’eA÷+_~³I"R–°pq"1óŒ˜Üƒ`Gº;¬Ø6ö¸í”HÁ=À9=Ðïp‡²ÿö' h3Püú¢‰Yú€ãöÌuwr3ap¹‹õ™oÿðïg¼§WdqnæB*//i;»Ý½^ïý¿J¥âŒÓ”••…´ø™^¯GSSŽ9»ÝŽŠŠ Žƒˆ0 d‰fóâ/]>ñ ½`Û3l!Нc„$ #¡~,:IA2_ÙÍjŸË¸Ço!½éü §Ñ“½q,½ÙáÚ'Ü]³÷˜ §½{ÂçiK/ý¨>˜zÜç>ïÚ4…ÀCëX_òwú< mv¡ß1öº 7ø»'ÿx‡õ™‰?ž¾ˆkݶ)·»ÖmÃûm×qutÛyX±,\@3™oªP(pàÀo)•J(•J( TUUáÅ_ÄáÇY˜HznÂÍâ…ŸdsU1¹ÿ茟é· Hº‰™ù,QI¾g›{±.ŸÑžãÂø7p„kž`Áˆf»Ïå}Ÿ@v²7¹ Wœ=gg°ž±’,8=¬Õ8éç©)@åNwÈÖð‰ûqdé~ÿÐܩܴßZ>þGç‚©GOW,l»×…’mhÞ8}iÚ#–,âàO·±x L¥R…¼­B¡€B¡€F£aáæ‰Z­†B¡€^¯Gcc#Ôj5‹!ÈÍÁ‹?§ÕèýXrÐ~I"ˆ6<ÅbÅAr „kžÀ`Û‰€ã‚ß8Ùz¾aC4$Ëœ¿ C û™ßßîüm~#(>Œ[x× oGµ³ïÆ”×OMJ¶ Püþ]¾¶¹_“ûKšæî˜Ýt7GÄ“;@Ã'î ÖÝ;vܹKâ³ß@)g»6æe`c^^(½ŸÝ±apðàA!ÂÈd2  ²„,Ñ, ’S ºw†®|èí”õ{q˜q„Š"~™(n€|ˆîÿ>/¾8O6Iáš'ÆÍå“[ù}ˆ³0xñ` Ïÿ”4÷>Ç™îñùØ7¶ Q¶Þ{™Ó~#ä¿)@Ûìþhzÿè‡ <³‹D.Ü <´^%'cĬ _:\hùܶޅ®‹Dîp¾h3ƒØ¹òß/ÅwsKaa! nÔÔ š&»ÝÎ"Dâó–€höÉ)*ŠàÊû&œönïÇÞ$Y\0„ˆ Y†”ž…ÓvN{70ì€@œÅPˆhž$.]ŽE…?Ûç€Ñ¿ËìŠëcñ¸±ñ'cïkx‚¤Ðß@wn@ËçÀÿœë˜íwÐò¹û|iš{”ÁCë9—ˆzÆn÷X‚ _¸xßpwÄ® h3»¤inI¥RH¥RðaEÓUWWçë+‹Y4 @ŸÉ„ôÅ‹!—û¿KË@–h ’S¸t9C–$¼wî_)ƃyü8EA YÆ@ˆÇâ>Ãõ΂ðÞ]xª@Šüœ¥yÇ-ˆ3á²»WêrÚº§ý.5ÅÊm ù3à´nlá/ÀÚ>ïþ’¦Š\ òœ7E<°“`4„ ”›éž#\¸NÀým}Ð~½·h½|+²Ò°b™Vf ],â—b–V«E}}}HÛŽ_`³zÖ ‡àÍ7‘ŸŸªª*¿ËÈQTK\º9éP*¥,ñAF‚$÷Óò<äËR#ò6&/`,ýª‰Ù›0â do]›Õ›ê…ëÜ_–>wÇlËçï8À}~ËçîîYPäÊå€Bäf°£2ÜL7ƒÐ_sÏ}tl{—Ä…‚{€Gï@šÆým¡XíüÿŸöìXo;‚nóð†ür×ýxxCÔsººº‚Ö©ˆÅbTVVB"‘°€„,ÅAJš_W,\.ïeÎñs¾gHšæ^ü«dÛhwåèWÿ¸ìȳ˜Gn¦ J¹ ¹{¼”ýóÎÒç_Ý€Þè‚©gò×E"÷ ÷çÃ.´ÖË=ØõÒ»¸Úm›t»÷Û®ãý¶ë8øÓmx¦h G1E&“A¥RM¹ÙlFWW`ß¾}\Ì+1q%%%è¸~ '.X áà*"â1‚(œ***ðÑ•>|ÜÉňµ¿%eoÄá4Àåû ºÓjœóÛà ïwè×ÜîÂ…/à7ëËÔ#€©Ç=Þq _&€<ËÝQK³c0¹ÆÁFX=°…ëœFV»?ú—oûíõÙxºh5Vd½sÑ{ÛíÙ+Ð~t½·QþJ6æe`ÓªLb†F£ yô€N§Cuu5ªªªpðàÁ‚\Z8 dcœ\.Ç€PŠÄ«‹Y "â1‚(Ì”J%¾´ ±×Êb…iKÌÈ÷²Î¯;$!0< ;àèƒ e~ZSåYc³–¾Ñ®Ìkî`Ðwî¬Çø°Hä‚<Ó ¦¦¸ÇHÓØMëËØ ôº;_ï Œ°=.ô;|׉»[s3ÝA¸r¹`´S™°‘à†Kh½|&í|ÕlY…ÖËñø o£÷ö þáß?Ä{¿~’¤¸¤R©°wï^ìÙ³UUU8yò$ÇD²DDDDD7Æ$/v²Fn]C’lý¼ßOˆêùØûÀØã™]êBÇõà!`¿C0êÀ#7Ó…T‘Êå£ßÉèù16ŸÖÒog«¥×}Z ¸ã˜läÀÄ¡j~Ž;|UÈ¥|êí)<7\¼Pzÿ”c6­Êı_>Ç_xï·]ÇÕ}X±ŒïZP|R*•P©TÐéthlläÂ^„,Å׋ Ÿ±à™$ë´},@ ;^jŠ; TÊM¡; 4v»ƒWý5,}Á»h}yÂÈ`a­‡'´àí²uŸÚÎ÷ˆÏZ¾<+Ü´ºï7àÂNlê57¦°¹AÀð5xºcŸÞ¶:¤íÞƒyøôÊM\í¶1¥¸&‹À;S–"ä¹K@DDDDDñÄwlëÎ-ïùN[wÄÜFy–û«hóX`hénöz>Žï‚±Û}^°™´ÁøvN܆Ê3Ba¼À31³ëß%qAšæ®]Fº¹™@Æß± `£Ùt‚Õ%‹…1s¿›››ÑÒÒ‚G¿ˆBÕÑÑÁ"D ²DDDDDWÆŒrõš"úv{F¸?Zï,ê@¿ctfj· wÜç›n ÐÛ㡤ÂóVƒE" 7ÃÝÓœšȳܳ^‰‚×…bËtÆôÝŒ™ûm±X`0Ë#ü¶Þwß}súýÊÊÊPVVÆÿ èõz=zf³ “ÉX”Â@–ˆ¢šã|Îè“pñT2*++Y"â1‚(Lœön NãØe!òW­@IIId¿ò[€„$À9 À=GÖå¸ p¹\0v púƒ\À?Xõð¬žÓ¹™£ç‹<ã<ºFÛþ6[žño4\ ¥L¹ýûm×½cVdq#еµµ¨­­öõÄb1Ôj5 IÏCX"Šê'£V#,VÀÂRDaå€Ój„Ñ ¤$'Düíõ[ç0\.w88Øú\#C@€Ö/Ý FùŽˆV¾¡æØVÀLNÏbd?'3ԅĬÆËþ6[?Þ¾å¯4à×ucy–dÒ…½Z/÷`×Kï¾½>›ócÃD&“!;;;èef³]]]‹ÅP*•A·Ñét,âþ.8‰„oND²DDDDDw\Cý@ò"¸ï@ @  Ãc§Ge,apŒg12¢ù¦yp~—÷)>½rå¯4àpƒšó°qU†w›ÞÛhÏ^ÁO_àž!ûO?ü‹®ß™F3á¨O—§R©ÄÁƒƒn3×£b…J¥šÖ•J•JÅÂ…A ¹Ëan.äòÀ?– dcܱcÇÐqù*ÖA$+Š ÎbQˆˆÇ¢0©®®†åö·‡!Ú\Ê‚…a¼øFºÚ½ÿ¾#eƒʼn .@*• ôz=RSSƒ^6ßÒÅ"üé6”¼ô.®uÛð~Ûu¼ßv}Âí=aì¦U™S[‹Å£Ñˆ‚‚>ÐbÀ ×Ü¿À?—kôXàY;wbÑêÕ—3qF£ÆÎ/¸?ÖBDÄcQøxä ¢…ß߆»Ú0lÔÁi»1aë[øµr¾_Iq ¹¹‡B~~>ªªª.×jµ0™L¨¬¬ K(»iU&ξ\‚Ÿ¿vÚ.£÷vðE»ŠÌà ¥÷GTk4Q]]þþþ ;B)ºèœð•Ë€Ÿ¢ éc KDDDDD1O².{7\.—7põ==QP»HèçR,ó„±SéïïGuuuØBÙt±µÏ(òvÈ~zå&–,bEV6æe ],ЍÚú†±{Ú€¡,ÍY"""""Šy‰K—#9†:üÎУ\ÎÚ» |õ„²¾§‰b™o»sçN…ÂI· …عs'Äb1ª««a4Ãr»?h¿Ž_×ý?í ámüüµ3øuÝ_PÿÑ利­o»sçNdeqöI,züñÇÝ¡¬@-ÿvÐtž“°DDDDD/’ïÙ§­.{÷”±p—„ã (6cSRRBº^JJ vî܉ãÇ/x§¬öìeüüµ3¸Úmó;ßw¯ôÅ"¼Pz?~¼}SØj,Œýâ‹/ø ‹Q?þ8°S–¦…²DDDDD7É)m| Hù…±³Éla¡4Ó0ÖÃÊ.d§ìO_DÉKKûwõZo;ð³×Πü•Óa©-;cã;eiºøô‚ˆˆˆˆˆâŠ e „žÂà'GýÎ÷tÌú†³K³Ó‰bËlÃX…씽z£寸ç?/Ï’à…Òû¡ypUÀ^Ú³—ñÛ“­ø ÝŒ?ž¾„o­ÏÆ3Ek¬¶óÆB¡PàFM ÒùŽX씥é` KDDDDDq'Ø"_¾³d=¡ì"GPì%Œíïï‡Á`zþx Êþºîcî0öìË%A¬‡fË*h¶¬BÙ˧ñFÃ%¼T÷ñ‚²¡†±Áj @€Þѯ…°dô+ÚÜà˜çŸÑërAW†²ä1 @ŸÉ„ôÅ‹Ž d‰(ª ïÝ…ûWŠñ`^‹AD¦Ó;Qm#Õ/£l߸àßâM´2”%Ïcñ0¼ù&òóóQUUåÿ„%"¢h–¸t92rÒ¡TJY "â1‚(ŒI)H\ºò<äËR£æv{ùrÚn#›šù/¤/\¼‰ü¢ .Z.KOâùïoÀÞŸ¨¢ö±¥~¦{¢Â¾¿W…´íÖdhü£†û›PÃØmÛ¶Áᘸ·pÉ’àý’óÊZo»oÓ·×ç„´}ºX„o¯ÏÆífôÞœ×ßE¨aìæÍ›lñ³¹ðÙgŸá³Ï>›—ï]VV†²²²yùÞ£ÿ>òÈ#ÈÌœß ^$šøÍù eÍf3ºººæì>ˆÅb(•J>‰ ²DDDDD·<‹|õôp ;BÙÔo.}û:ñƒ_4ÂÚ7ˆŸ~o=ž|t% ÖHÑy݆ -Ø÷[öý«·zxù…˜ÿ}n}@†‚Õ|Î×tfÆÎfæéBΔÓéŒMII‰ªzÌt¡¶ƒd2Ù¬~þ¹sçfuýÌḬ̀×{>BÙúúzÔÖÖÎÙmT©TÞß-,²1®¤¤×oáÄ $\Ý‘ˆxŒ §ŠŠ |t¥wÚY ¢Úß)K²¹ù½ßùîy²‘Û!kísà¿h„Ë|òöwý>¦_&BÁš <ùèJ¨Ÿ©Ç+hÇóßßÒÇþ£Y¬wÆN×\-àªù e—gIp­ÛíG—Cš kµ;ÐvÅ2¯÷u¾ðŠv*•ŠEðÁñ4²1N.—c@(EâÕÅ,ñAfJ¥_ZØke1ˆ"lK,ƒpíüü £ "ÑïO`íÄÞŸ¨&œ™šž&¾¿WáÙÿ݈ —,Þ@¶Ódá· øþ“ À¡· è¼nÃÊ ¾ÿ¤"hpk퓸áH IDATsàÐ :¯»Cî‚5R|ÿ)EПkísàO§¯zÇ(L´m¨ÛÿùéiBüuÑŠ€û½ÿ·:¬ÈãÙïò#¸ ÆzÌG(ûðúï"]ó2& kµ;Pþ/§a½íÀ’ÅB<¼!gÎï#ÃØ¹£×ëqôèQ466âÏþsLÞǹ eU*ÕŒÇ=Øl6Ô××Ãf³yÏ‹Å|† Y""""""I² ùú œ7.EÅí}åP;àÙ L'] ë¹gýÎë¼nþÕáŠÉ†C' X""=Mˆ«×íxùPþü‡b¿°ó÷ÿ¯ÿë7-°ö bÓj)¬6^>Ô†—µá?~³ÕoÛ oâ‘ïÕÃÚ7ˆ9b¤KDÞmÿü‡bï"c¡n­—,È+ªón{õºûþU‡ÿøÍV¿ðuß¿ê°õYܲá c=æ:”}¡ô~h?ºŒ«Ý6|ç…?A³%{¶­[{²/Õ}ì9û“í›æü¾1Œ=O0X__½^÷y®BY•J5£.dN‡ýû÷û…±ó9Ë—¦–À¹‰Öm…Qq[;¯Û°"G<«1‡NpâwÁzîYt6ìÆüf+¬}ƒØ÷¯º±Ÿc²á¿h‰Ÿ¼ý]\øÓßx·½pÑ‚ü¢Éï{>õãÿ.ðç?£³a7.üéo¼Û>ÿRË´·ܳr¿ÿ¤.}:vã“·¿ ØÿÛó|ÐŽî0ÖÃÊŠÅbTWWÏx&)¬X–†c¿|K a½íÀO_Âí×¶ë½íð†±?ÖlÄ ¥Ìé}b;;MMM¨ªªÂ#<‚êêj¿0vëÖ­1ÿüq¬[·m´.ׂüL›Í†šš”——Ãl6 >Ì06ÌØ!KDDDDDä#1»Ã×>ŠèÛØirw9­Ì‘½ìÐÛ†€óƒ}”ÿûO)ðä£+½ÿö»JüàM°ÚÆV¦ùP`߸ÑÏ~W‰Æ¿táÐ ÞþŸN<ùèJ¼ý?è¼nÃÞŸ¨ ~0;`Ût‰;ìu;M«¥~‹’¬ÉÀÖdhúK°>"%Œõ˜ËNÙ‡7äàìË%øuÝÇx£!xûò, žÞ¶O­žóQ cgÆl6ãèÑ£Ðjµ~Ý™€»Û³¸¸jµ‰$.ê±3e=]±ž `Wl$a K4‡\Cpڻᴑ.G¢%¤,aaˆà´Ý€ÓÞ×ðÄYH\ºœE!šG#·®Á5ÐHg"A²ŒE¡#ÿe’§+¶×'8õðŒ#/ØGùƒº›VKýþá’{¾k°1Ï>¥À¡\¸hÁ“®ô΂U?¸ÂúïÿoõØ÷ q»±Û8çPý@6Yz½>¢ÂXñ¡ì?þã?B*•Îè{­X–†Úç‹Pû|¬vGÀåÏ­ iѯér8ø·û7†±!²ÙlhjjB]]]ÀH™L†ÒÒR¨ÕjdggÇe}|CYÑ‹ÄcÍ¡‘[×0Øvö±.HY‚ä5ßá›!óu¬³wcÈpÇ. ‘¿jJJJ¢úþDËÂ^ž`ÓWÁ)þü‡b¿óù^}Ðë DÓÓ"odÃD‹–qÛß222°hÑ"ôõõ¡¯¯/bYÞÛä¹³õAûu¼ßv׺m¸ÚmÃÆ¼ ,Y,ĦU(~pÕœß~‘H©T “É„žž²“Ø¿?´Z­ßy2™ jµÅÅÅ   »ÛýºànÌa(Ë®ØèÁ@–h–\Cp|r40hñ¼(ìéÀÈ­kHy¨œ¡ì|<µa±–‚"ÔHO‡; fØÁ¶HüÆz×>ÁbñAs`ØxC Áÿfôbð“£HÎ߆$ù},Ö<<'rZ0Z”d.U±þºhþtúªw\€GzšÈo ÅÇþ&•JQYY‰êêj?~`Û?îÄŠÑàv²íš>î¦ÕwñAÍiÞ„ux€­MœÑhd¼9溯¸ük :þ 'Ž7öÞ[Ùˆ‰ Ç®}zäýè/ˆ‰ ò4Û‚Ig/ö ·Ï¶è×ë//EéU“ú¹O%`ËÝÅ»Ë;‚¼•‹ÐkDUM zû†ptÿ÷ÀÖ.á¿ÊÁ+?¯¶€Š£X­À{ofy´ÝdŸ+ÙØ“²3Õ¾`*“±ð‹C_°%cO¾·1âp·Û©ŸNúé¼w>»„_újÊù²'eÙ¾À=‰DµZ µZ “É„êêjèt:tttÀ`0À`0 ¬¬ ¹¹¹ÈÍÍENN$I@ÇÄÉXöŠõ¬%zV‡ÅBuðÿþð{c&c½ûÃï®u™q®ùÆ”¿FVÊz&!!Ðét(//ÇêÕ÷¬­­Eii)Ö¬Yƒ]»v¡®®. c0ÕÉXVÅVÈM£ 9Ñ Ѭú+î²Ê;y‡(³¿±NU³ü»LcÑ Þ{+Ûã헥ŢöµÛÛÆzœ­ë”غγ~„¹O%x´°˜'Ûõ<Ý=Ÿ±¶­¦»RÖÉXÀÖžYšèÑö1âp<³4_\0áæ­!¯¼VVÊNŒJ¥‚J¥Bqq1jkkqèÐ!F˜Ífèt:èt:œ>}: ^³7*c:äÔ+°U%k4šI=žB¡@qq1wЙ8Tdˆ&/Hìüaf¬¶cmODM$–Áz÷´hwóÃèë‚ç%1hD“o¡@D0Ð7æ˜=>‰ˆfƒ‰$eÿþ÷¿£¯¯oÌÇZ°`ÛÅiï%c}ÙD’²]]]hjjò›×ÖÖæÖ>£[:tµµµèèè˜Òßsá´··{5FáááxüñÇÝÞæ­¼ÜÑëõœèü²D (Fîtð7î`H8ûÇÍ2¡ò'04ªO¥ýtéÑóDPì# у~°OÇÈÝź[‚Œ¾ €“‰hVñ4){þüy\¿~}ÌÇILLĆ \®÷v26I&Ak—ºSÍõ„íµ âüÕži‰­§IÙ+W® ¡¡Á¯ö›(/?~BBŠ‹‹…ªÙ© \$Â×_=-1r—Îd,ùñçV†€hòD¡K{Cû_Nyî„&ÿ#O$še‚ç§"(öÜé¾"Ìn GXÚ Ñ MþGܾqYèß<Ö¢^"± ¡ÉÿÈ€ѬâiR655%%%.×ïÝ»ýýý.×OGeì³K…Eº2’c‘™2Ìm{-ƒ(üÇÑ{kÑ‘ax6=Ñë±H¥lyyù„_§Ó¡ººð=ÏúÁþf0ðÍ7ßÀd2!'' ã·&ÉÍÍ’ß  ØaoùÀ 7×{;Ë…»õ"z@Áó’šö¼­WäXÛ,P!DþƒE4 …¥½0nõ«H,Cøcl§[Ñ ÏX QŒ|ì1#GxÆZŠˆf¥©^èkºÚ¼•ÿ$¢#Ãp­ËŒçßú…¿=ŽÏÏ»Vòîûø}õèNÚÛ¶&sÚbëÍ…¾²³³QTT„M2ý`?ÓjµØ´i“¤^³fKSN‡M›6 ‰æ@ÀÊXš&d‰¦@H|:"–oEpüRá P$–!8~)Ÿ܂0Å ‰h–…F ‡ˆå[$‰c ˆ¦jÌED#âñ|„¦>gû›DDÙÆ\Úóˆx<Ÿg¬Ѭ6UIÙéì»0. ÿñ‹8~ _\pMÈÞ¼5(,öš:oå/ŸÖØz+)+•J¡T*±¶ P_¦Õj¡Õj]®¯¬¬tº¾££ƒ¥¥¥Øµk—ß«™NÆšÍfèõz Nr¾²Ohp¥½ÝmOf¶, šÂ@žr<ýÂÛ€'‰ñTrƒA>-x~*{VrŽ i"‚g§L³ ‰ amÀÚeR¤&Îc@ˆ|x¼Md¡/wfb¯Ì”ù8ùÞzüâÐW8ðÙ%·Û$É$ØüÜl^±dZZ¸3‘öèСC…B!œZ¯Õja4¡Õj‘ŸŸ‰D‚øøxÄÇÇ£££:999SÖ¶`ºÍT2V¯×ãСCÐëõ0›ÍN·%$$@¥RáÕW_½o»òŽNàÃݶ‚a…,ùµàyIˆMLR©d0ˆˆsÑ …D x^äÉŒ¹ ;ùÎx›l¥ìL$cíÆEAûÆ ôüšÛê×—V¤AûÆŠKÆÚy³}/3™LBbpïÞ½ÈÍÍEnn.ÊËË!‹Ø’ˆ V«¡Óé°zõj¶þh¦’±………¨­­uIÆÚß {[ˆ©Z0¦²DDDDDDD³Ð褬»ÄŽ#³Ù>0™LBU¬ãb^ö¤­?ˆ¸ûo¦8¶z¸¥Ré·=z²DDDDDD>èý*#r–Ç»TsîÚ§GΓñX”(ÁûUF´\7cÙ)¶¬U &*-íf|t¼-×-ˆ‰ Ãë//ELT¸ËãÛûª¶\7cQ¢9OÆ#÷©—ÛàÚu‹ð{·©=eBÝWÂclÉs­æ­8b;M6oå"üöOÐÛ7„¬XˆÜ§°kŸ ÄØºNéôx;·©œÛñõæn;{ß]ûãÚ½±%ÿöË”nS±J6@=›žíë+°þ—Ÿà_ÿÇghxo½ß¿¦úúz444`¶Óã3|ø¹&$$ ¨¨kÖ¬qZ`J©T¢££ÕÕÕP©Tèèè€F£`;ß“ŠOš¸û-ÖG3ƒ Y""""""söb7jOuà7of¹ÜVú;=~°b!ê¾ê€Õj»®âˆ¥ûô8ºïûX»í/°Z˜¨0\»nAÅ#þVµÎ)™YqÄ€W~n«˜ÊY£ÇZPú;=¶®Sà¿Ê´\7£ôwz§Ë;·©ûTzûñÊÏëPUÓrwQ.©í9üN?þ*Ç)ZqÔ–Ô}¿ÊˆÚS¶Þ’V«¹O% ôwzä,¿—ý²»öéÑrÝŒŠ#F,L£·oGŒ¨8jÄ_ÿ´Úéu¼òóZT1:=ûï‹–„¹$dsï¶^¨:Þ‚7¶¤sG Pê§S†sÍÝ8×|çªd'£§§G8UÝ×»bïÚµKhK`6›QVVæ²V«u¹®  €;®ì §ét:§–c1›ÍBul||<èC¸¨ùµÁ3‡pâèÜþ¡'"âA4}îXº0xæÿÿûpøðaäÕ~iK\ŽÕ_õ£ã×ðúËéè=½½§·âõ——¢·oÿôr5vnS¡÷ôV´|¶;·©lÉÍ»IJÀVQúÊÏë¹DŠo¿Ú‚ÚÔè=½;·©„¤*ä>•«Á–$ÉY«¡¥?¶%J§GUM‹í÷žÞŠÚÔøö«-È\"Å+?¯Cí)çžu_vàÛ›Cø[Õ:üõO«ï› =z¬«Z‡–Ï6¢÷ôVlY«ÀÙ‹=N¯£âˆGŒØ²V–Ïò…ç`µg/ºïÕ¹,-Ñ’0¡‚–ã-pe$Û'»ykˆÁ˜F:nÂ=aóóó¡V«<ØãT]]íÔîa,&“ b±¹¹¹  a…,ù÷‡ÑÞ6ôô= qŽ šQÖáÜémC[/ʺeO:¶p”¹D*$G{í2—H’¹Ëã± @oß½¤Ô{ýŽýßwª6-ý± U5-øíŸÎ;=öh½}ƒÂïzï­láú˜¨pTíÿ>’WBÅQ£Ës/ý± ËÒb=zýï½™å´í[–âý£F§×ñÛ?]@´$ ï½™%¼Ž˜¨p¼÷fþéåê1{Yšu_vp¼¸/.˜„`Oü9ö8³ç©B¡`«‚ ÈÍÍ…B¡€ÑhÄ®]»pæÌ¬^½Ú©ZÖl6ãÌ™3¨¬¬ª•7nÜÈ8û&d‰ˆˆˆˆˆüLLT˜óAúÝäç²4©Ûë}tüš­ÉŒk&çÞ‚É $8w©µ§Lc&ƒíÕ§=*EÝ—&·ÏÍ]ÂÓÞ.À‹öäìÙ‹ÝNÏ#gy¼K_Ù±ž7Í¿8ô¥p9:2Œ™F:މ?/Û»w/6nÜ‹ÅNwßJÙœœ¶„˜äV+Â,€\.w¹ Ùwøða\n¾†ÁÞ!„*V H,cPˆˆsÑ )++CÏ­a ÞAøãù ÇÛ˜+AÝÉ]î>é8:‘9Þãç¾Tý@ϱâˆGŒ?w rM8.fÏN?Ï'ù›»<u_vàìÅn+vif}pü"Z»<[˜è‹ &|~þ: I&ñûþ±þ†ÉXïKHH€N§CII‰PëŽX,ÆÆ™Œ!q^‰ {ñEÌY²Äåv&d\[[ÚZšØNk!"âA4sì rÇÛýŒ®€jËÒ¤ø[Õ??Ðcìܦ·µÁL:w©™côß­½[ÕËd¬ÿ8püÒ¤Zh__ÁàQ@’H$(//‡ÉdBmm-, Ìf3Ìf3 P( R©˜ ÷aLÈÍ"™K¤8{±½}ƒ.U«G ¸f²àõ——ŽYÑj¯Â­ûÊÀ5!»kŸ ÄØºNéõ×qî’ëëhi7߷˜WtdžYšˆ·òŸduì xâ‰'¦ôñ Xá9Ž„„lܸ‘ðCLÈù˜eK¤^;­>oå"œ»ÔƒûUþø«\áú³»ñÊÏë°0QŒÛÆ®|]´@‚œåñ¨=Õªšä­\$ÜöÞûçQú;ý¸÷Ÿ*olYŠW~^çô:zûñÊ›µãÞïÚu˘ճä›>ýåZh’ ¡×ëQ^^î´øÍ,&d‰ˆˆˆˆˆ|LÞÊEøíŸ. öËŽ)OÈ–þX…ªšT1¢öT¶®S ·oG ˆ–„¡jÿ÷¶_˜(FÝ—x,ïÿàõ——bë:%Þ{3 ¹/UcíkAÞÊEX–&Eí—&Ôžê@æ)ÞØ²Ôë1ÚºN‰Ú/;PqĈªšÛs¸ûûÇÒÒnFËu3^y)w2"/˜êjV&Ý3›Í·æ±X,\[ù0¶3‹ Y""""""“ûT¢%a¨=eÂ[ÒnËYE‰b—ûLäú³ý3*ŽPqÔˆßTœÇ¢D ~°bJ·©°hsÏÁªýßGéïôè5 ‹j-K‹ÅÙªFé>=Z®›ñ›ŠóX–&ÅÎm*¼±Å¹ÝÁ²q¤9Ëãn_”(FÎòx·=tGo ¿ÎEîòxÔ~ÙøÍ›‹ðÆ–tˆ”Z·¿ÏÞ?vë:w2"/`{ï³W¼NTYY™ÓÏ{÷îEnn.:C˜%"""""òAolIÇ®}z´´›’¤µ¨Ýn?Ñë·®SzÔçuYZ,ªþçs¹~Ñ *~}ÿƒù÷ÞÊó¶ÑÏm¼ç4zÛŠ#$/¸Ü§·oÐöü]³ùíŸ.Ø»\Ð˯œk¾¾þÉõ~fi¢m¿° â|K7 jnûË’_ª¬¬œT2ÖFÃ…¿f²DDDDDD>è-KñÞûçQqÔˆÒóÔÒÑj¿ìÀ+?¯ÃߪÖ9%XÿíW €ÜåñÎÛŸ2áìÅüõO«‚ Y""""""Ž÷ÞÌ¿ýªÁ¥ ÙÖU5-x,ïrŸ²%_[Ú-h¹nÆ–µ —JÛ]ûõøÁŠ…È}*Á#¿"•J!•Já'Ï×l6£ººµµµº_yy9ßl©ÕjÛ Ø“¶J¥’}c§Ñ€.}í툉Œ„\.wº Y"òkamÀ“‹Äx*9ŠÁ "ÎD3(H"CØc°v™©‰ó)²ug/ö ª¦Å£ö³É²´X´|–Š£¶E½ sÉCøÍ›YÈ[¹ÈiÛ–v3¬VxÔbãÍ÷¼ûÃïáæ­¡zŒŒäXüÇ/òÑ‘a@^b6›ñ¯ÿú¯0 †ÄÇÇOø> …­g¶X,f§Q'€ƒðá‡HMMEII‰ÓíLÈ‘_ ž—„ØÄ(•Rƒˆ8GÍ QH‚ç%AžyÜ\d ׃u¶‹‰ Ç[Ò]>mÑɘ½t9Þ|ßTô{‡ãÙôD/ÓjµB2V,óù)VZZŠÒÒÒ Ý§¸¸˜óALÈÑ«®®`«Ê,//ç‚QDc`B6À­_¿—¯‹£g{$‘1 DÄ9‚háÔÕ>|Õba0ˆ8ÞˆˆŠÁ`€ÙlìÝ»—ÉX/ÐjµÐjµ“ºoyy¹ÐCö‰'ž¨T*öî!LÈ8¹\Ž0)‚qðp IDAT¯E2DÄ9‚h†)•J4 õ øf/ƒAÄñFDP,–{_€%$pñ<¢ñ0!KDDDDDDDDÄ^} Ø÷b…¬wb\PP0©û:.fŒÉ,FSƒ Y""""""""z`999¨««Cmm-Ôj52ÅT*•Sâ{²&›Ô¥©Äù¦úúz”••á€F®ÅÅÅ‹ÅÐh40 |ó|€Á`€^¯úû’o`…,‘êééÑh$ùøsíèè@aa!ÊÊʰiÓ&äææB¡Pxt_Vmz‡F£^¯wZÔ‹f²Dä×Ï C.þ%ÅÅÅ qŽ š!w,]6Çáæ0¤¦,ÄúõëýöµˆæÄ—{úøÞÇÛL»ÖÙ‡…qQ|ãý@aa¡Óϵµµ¨­­õè¾LÈÒl„,ù÷‡ÑÞ6ôô= qŽ šQÖáÜémC[/êßÑD÷?m]V"¾Á4%Ú»9Þ&£à·ÇÑÚeƶ5™ØôÜĈù3Ѭ¤Õj¡Õj'u_Çd¹D"Z­FQQƒ:C˜%"""""r<ïÞIÁí7Dèé¤,Σdh¾¼—Ü’È” ¸ÖeÆOþý~òï' ~:/­X‚ÕO¥00>æôéÓ ‚0›Í¨¬¬„ÕjåYd^@nµ"lÁÈår—Û™ p‡ÆåækìB¨b‚Äü£ODœ#ˆfJYYzn cðÖÂÏg@ˆ|x¼?¼·¿¹ÐÕ[ÿ;cJæ³3Îû—($‚AñÐ³é‰øâ‚IøYwò*t'¯b¡LõÓ)xMÁ–4+¨Tª ·vÐétèèèÀêÕ«‘àtÝ¡C‡˜õ’8/‰D½ø"æ,Yâr;²®­­ m-Ml§µqŽ š9ö9ˆÈ÷Ç[HÒBB¶áï€BdÿãJ“Sñ‰g¯ˆœö/òÜ[ùËñš:ºSÍмŠêSWتf÷}|û>>‡gÓ±ù¹%XýT2[PÀR©T^˜K¯×£££jµZ¸¯B¡@II À`0@©T2¸ÓŒ Yš·o\Æí—a¸ ÀÖ³f/‘x>D!…F x~*‚baçÎÄ9bšXGp§û nÿW+Ç‚bl§É‰"¢mcnžœcΓ¸‰e^ð8n·ÛÊßÿÛõLÊÒDÕÿhøÚ!»(›g%MBŒ8/­HÃK+Òp­³ºSW±ïãshí2>?ŸŸ¿Ž˜Èp¨ŸNÆÔÈL™ÏÀ̓Á€Ý»wÃ`0”J%ŠŠŠœ‰ƒgΜÁêÕ«!‘H´iæø^X,d0!K“vÇÒ…‘«ÿ‰Û7.3äœ °Ü€õîeûþ!ÏGhÊ÷›ÊqŽ Îœ#¼àö·­¸Ý®ç˜#×ùXHÈ· ŸÁóS¼@åÔ+•\…)VbÀÜëÍv¶¤ìõ«³º¿þ º8îЪÀþ÷Ž̸(l[“‰›·ñ‹C_!:2 Ñ‘áhí2£÷Ö >8~ ¿„…2 ›fz½Þiñ(À–|-,,ÄÞ½{‘›› ¨««¨***‚Z­f𼤨¨‹ …B¸Îñ,Çëiú0!K?èÃpó á=‘Gûå†"(FŽää ç"ÎS䎥 ÃÆã¬‚¥ ±Ÿ¹#GØ£ÿDì½8–ðÌuÖ*• ååå ì `B–.œ#ˆs爉Œ¹óGzÅFGGãé§Ÿf"–<2wî\¨T*¤¥¥áäÉ“BÅìH›ÖáAVñwð¿ÁóÁ°ñ3áïÞwƒ"èêm­ ²µb…J9×kš5Î^ŽëmUÓŽ‰XÀVªxŽÃfXtd8Dúú‡póÖ_¾†ììl( tj4ˆñƒçë®õ€R©D]]ЪÀÎ^±i6›a2™Ø¶à>T* &ußøøxá²ý1¯£iþLÁÐýŒt\pJ´H¥Rdee!44”Á¡ feeáܹshnn`ë9lüŒœ#ˆ8Gxhèë?;%c“’’œVÊ%šÈ˜{æ™gÐØØ(TËÞþæ†#¢¸èÐ8D!{ôÜŽ_ŠáæBoYhøZ„†¯m•²+TÀ²GX5ˆzú€†¿õœ{Ä ûHôÛB•ì‡>£ªO5ãƒã— ;yÕéú$™ê§ý«¬T*…T*…¯§öíŸG,‹Ëmö…£“²“H$0›Íèèè`BÖƒOÅç¾É&uÉsºôµ·#&2r¹Üév&di\·¿muêMǃ>š ™™™ˆ‰‰Á™3gøà/ì± xr‘O%óh‡sqŽü9bô )))ÈÌä)—4y¡¡¡Â¼mOÊŽ´ÔC4'f§XId{lÖ.“"5q^ÀÇ.x^‚UqûÛVŒ´Æî+Âm=}Àÿþ«íŸb°,•ÉY×Óg«†­¿`Eû ‘Ûm¦3;ÛÆ›§Î5ßÀÿÔ5Bwò*zo9¯iðÌÒl^±/­Hc ¼H¡PÀ`0¸T»Ú+1;::\î3ºj–(t8~ˆÔÔT”””8Ý΄,É:2€á‹Ÿ?GGG###ƒ¡)±páBôöö Up#-õš—4á°Á󒛥RÊ rŽ Î=GXú\¾a2–¦ŠJ¥ÂÐÐÐ2døòqÏdB§Z‹B"ì×5Bw²Ùm5¬ú©¼¶&™)\Xv¦èõz(•Jèõzèt:èt:H$( ¡]F£F£qªŽeÒÐsf³›6mrŠŸ;&“ %%%P«ÕNÕÊä˜ pr¹aR_‹œÐýnówàî©VsæÌÁÂ… Lòйsç"%%E8-y¸ù‚UÎD9G(•J4 õ øfï„î7ÒzZ¸üðÃcþ|h’wÌŸ?R©===¶1×ú¥­ês·©$ ‰@HüR¡ïíî˸Ýu·¿m]Wƒ]A+ ¬Ç‰„-“´×Ów/ÛÖi…±]4j1®1’±áQ¶–²TVÂÎ0ÝÉfì×5âóó×]nK’IðVþ“P?•‚q8ƒ5à ]®3›ÍÐëïsŒN$* 3x*))b¨P(ŸŸ…BF½^¢¢"X,èt:ttt@§Ó!>>ž yù&dÉý_ç½þ#ii<̓¼ëÑG½·¢úÍvXGXuÀ9‚ˆs„ƒÛÝ—âAäM©©©BBöNwà§ Y_d¯œl}¡ïX:qû¿®áNo¬–.Û÷ô _‹Ððµóõ æ[í¤fËÀVmÜsó^òµû¦»Ê×±+aEâùŠ‘#ø¡…Çùmux Ú÷ñ9|qÁ9‰·ù¹%ؼb žMOd€|Œ§íâãã¡R©››+ô”¥ñét:!¹]PPà6ɪT*¡R©ŸŸÒÒRÔÕÕáСCÈÏÏgœ}²äÂ:2ëÍ{çN9®ŒHä ¡¡¡ˆŽŽ¶õ¬ƒí´dV!pŽ âasÇÒåT‘Í‚¼*>>¡¡¡†uà&îXº$æ TSM…àˆ¨{ Ú‘Ü1wáη­¸ým«ÓßÚÑì•´gGuA˜n…|¾Ò(+bcl‰G…üÞíþœ´ulí`l³ýßÝkEOŸm7¬ønÐ]¢uü6¢èž—„ yI’ÈXà’dl[“‰ÍÏ-a5¬Ú¹s'Ôj5á%öd¬J¥ºoÅ«D"Aii)ÔjµP¥ÌÖ¾ƒ Yr=ð3w —£££Ù£Ž¦íàÏžl¹Ýu™ YÎDœ#îºÝe.³UMç˜kmmµÍû½mLÈNQH„íôøyI°ÿe½ým+¬·nàNÿ·¶¿¿·nÀz÷ w¾ÝM\Þ¿ªcrV9ª5·cw,sÂl}nÇÒÖ|7tÿDZ'Wí ­·µß7j£þ+¶á@ä|Idš;A1rîÓ~æÙôDl[“ õÓ)³òõ×××£¡¡2îþóULÆz—½ïêÕ«=Ú^"‘‹©F&d}²äâη­NƉ¦Cllì½}ð»^„s爻¬#Â嘘î 4-æÎ{oúŽ™!Áó’7 Þþ¶Ö>X¿ëÅK¬7ݶ<SÅi{`ÄK½¢Ð‰eŠŠ‚ùXl€x+ù¬~ý===0m_ÐâýÄONŸ>ÍÝC9KQ©T¢®®ŽAó1LÈ‘_Üù®Ö[7„Dúm‡/RÇkƒàëDÑ \^¿(lD‘ó4'fÖõ{mãíƒãÑÚež²ÇK’IðÒ ®ƒàM&“I¨æô”ãÂ_žö¡­Ìfó„Þ ò=LÈ’Ûrvaaa M Ǫ/ë-Ï+<îô¶¡§èa9GçÎDSÆ©BvxÀ³¿Ã¸ÓÛ†¶^ "4ˆAœö¾´ŽUµc5rLÔ:žýØ’}÷}ßoŽ[•+Ï‚Çïñi¯fuäðÜYÝÊñfwàø%—E½Ä3K˜õƒÁ€Ÿüä'“J —% >þøc.B5Šý ÅC‡yÔ~Àd2 Õ±999  aB–\^Ne%M§>¤ãôF#ÎÄ9‚sу fⓈ¼`÷îÝSR‘i6›QVV†ÒÒRÕAnn.ª««¡×ë±k×.™´ÖëõÐh40›ÍP(¬8žfäV+Â,€\îÚœ Ùwøða\n¾†ÁÞ!„*V°y=qŽ šAeeeè¹5ŒÁ[#<Ÿ!âx#"¼ûÃïáæ­±W‰;pü|v ð¿Ès»Ÿþû 4^íf@½È`0T* <º½2¶¼¼œ¼ÜÜ\¨T*èõzèt:ÔÖÖâ÷¿ÿ½S²U«ÕÂb±ï…X,ÆÎ;¼ià%‘²_Äœ%K\ngB6Àµµµ¡­¥ €ç§›ç"òû‚DÄñFDžËL™?îí_\¸.\~6=qÌí¢#Ùnkº@¥RMè>Ý~¶Ú»w/ a4a6›]*_G÷â-++cu¬bB–ˆˆˆˆˆˆˆˆ¦„X,†X,öx{&b'F"‘ ²²:Z­v̘æää@­V³¯bB–ˆˆˆˆˆˆˆˆØéÓ§'|¶*˜µZ µZÍ8ú)&d‰ˆˆˆˆˆˆˆhJ™Ífa*³Ù,\/‘H„ ΄„Šf%&d‰ˆˆˆˆˆˆˆhÊhµÚ1O§€ÚÚZhµZäçç{¼ø&“ “º¯B¡`û„,ù­èÈð mŸ‘Ë yÑ®]» Ó鄟Åb±Ó¢Rƒ‹f³Z­عs'çêêêqÝã)//g¿^„,ù-Çë¹æÈL™ïv»¾þ!@Œ8ܯ^_vv6 :5ÄøøsÕëõB2V¡P ¸¸ØmP¯×£¬¬ F£:999ÈÍÍåÎL³²DDDDDDDä·Êî†]}êªÛ„ìµÎ>œkî0ñŠÚ™&•J!•JáÏU£ÑT*Õ¸‹L©T*TVV¢°°z½øÃ˜õÀêÕ«Ç­r5›Í00™L¨­­…ÅbÁÆñꫯ²]Á4Р¯½1‘‘ËåN·3!KD~-ì± xr‘O%G1DÄ9‚hId{lÖ.“"5qBÄñ6mÆE!I&Ak—û?nDFr,ÔO§·_ëìÆ_}"üüÌR.$å-ƒ$ˆ! """"""¢@Ãdìô[½z5[°¤¤&“Éív&“ %%%Bg~~>ƒç‰jµeee †a…,=0µZÚÚZÔÕÕ¡¶¶µµµP*•‹ÅP©T00›ÍÂ`€-‰ËêMšm˜%"""""""Ÿu®ùúú‡¦ìñ¢æ†ùUõl}}=0 ãî?_VZZв²2TWW 8%aíV¯^ÒÒRîä4ë0!KDDDDDDD>ë§ÿ~_\0MÙã=³4Ÿþr­ß¼þžžF@’<_‰D‚ÒÒRäç磺ºF£ƒ‹m‘E…B•J…Õ«W Í6LÈ‘_³4+1!KDDDDDDDD¬¼¼Ü£íôz=t:::: T*ñꫯ2x4«0!KDDDDDDDDL¥Ry¼]~~>6n܈ÊÊJ “¹D€ËŸÒ¬°}ûvlß¾~ñ|;;;~>vì¶oߎŒyŸ¦¦&l߾ǎãNÄ9‚sMË~X^^ŽmÛ¶áùçŸÇ¿üË¿`ûöí¨©©™’1éxPfßÇ›šš¼þº,‹Ë› ö1¨Ñh&ôÚ‰ˆˆ•D"Z­`«˜5 ÍLÈRÀ;vìÑÔÔ䉈ݻw£¬¬Ìå ·±±3atëÖ-466zå ’ˆsç¢ÑãfëÖ­¨ªª‚ÕjÅÊ•+‘œœŒ¦¦&”••aÛ¶m°X,“~|û˜½ßºuË«¯«¾¾¯¼òŠWƉ} ;v GõøµQàaB–^CCd2²²²PSSó@ˆÓõ|Ç£Ñh|þ5qŽàA«¾¾)))سgöïßââb¼û¨¨ÀÊ•+ÑÔÔtßJP_ÔÜÜ<-ã§²²’_ŽBU,÷¢Ù† Y hhhh@FF²²²Àã ¸‰Môàm²{éééèììÄÁƒùæqŽàA3â7¿ù `ÇŽÈÈÈpºM,£¸¸2™ ã&§:ñéÍ19•¿'%%‹Å/ÖDD43²³³QTT„M2쵕••aïÞ½(..æM³ õ¢€fO¬dee!;;‘‘‘øè£°víZ·Û755A«Õ §üÆÅÅ¡  ï¼óÒÓÓñî»ï:diµZ§äMFF °xñbáºÆÆFlß¾o¿ý6Nž<é´}^^ ¶€óçÏãùçŸÇ¦M›°yófaûÍ›7£¼¼UUUÈÊÊr9&"Μ#È›êëëa±X°råJÄÅŹý *22ÒåþZ­VHÔŠÅbäååá?øÄbñ¤ž»ÇÛ´i“˶ÐjµB;€ÑÛþô§?Åùóç@kŸ|ò‰0ž?úè#TUU IVûøÏÎÎv;ž+++ÑÔÔ±XŒ}ûö Ûdee!.. 8zôè˜ó ‘T*…T*ED€¾¾ÜÜ\¾ÉptèkoGLd$är¹ÓíLÈR@«©©L&–ì§$766º$*šššð³Ÿý V«ˆ‹‹CMM Þyç—ǵX,øÙÏ~†¦¦&¬\¹«V­BSS<ˆŸýìgøõ¯í”plE2™ Âs«ªªBdd$6oÞŒ¸¸8lÚ´ „L&êU«Ü&SŠŠŠ°mÛ6h4ìÛ·oR°$ì± xr‘O%Gq‡'Μ#8GxYss3Ü7ÙïîöààÁƒHIIAQQ"##QSSƒ ±±{öì™Ðs9vì4 d2™Ëãuvv¢¨¨Hض¾¾ï¼ó"##…/EªªªpàÀX,bÕªUl_xŒN8ÛÇsVVV®\‰ÎÎNTUUáwÞAQQ‘p_»ÊÊJ˜Íf¤§§°%o«…‹ŠŠ°uëVTVV";;{Üä¶? ’Èöج]&Ejâ<"Ž7"¢Y«ÀAøðC¤¦¦¢¤¤Äygˆ(PÙ¯q¬\ÉËËTUU¹lðàAX,ìÙ³k×®Evv6vìØ•+Wºl[UU…¦¦& ¸¸X»v-***`µZ¡Õj]î3wî\ìß¿k×®ÅÚµk…OÇJ;{¥›ý²»ÚÅ‹cÓ¦M<-ù®àyIˆMLa¿!âÁ9‚sÄ4°'d'š@´ï2™ {öìÁªU«„1”••%,vå){ºL&ÃþýûoÓ¦MÂb}vZ­‘‘‘¨¨¨ÀÚµk‘‘‘;v ==]¨zuü’cÕªUÂx;zô(ššš°iÓ&ìØ±ÙÙÙX»v-öïß™L­VëҚ྽Úr IDAT›o¾Áþýûñî»ï:UÎÛ‰Åb\ëQH‚ç%AžüˆKq¼Ñ=LÈRÀ²ØÙ,öDEJJŠÛ¾v HOOw©Zs<ØîäÉ“àrš¡X,FFF†Û•ÌGWψÅbÈd²I½¶Í›7#%%UUUc®¨NDœ#8GÐT›lïÕúúzaß]µmoËq¿ë566 IÔÑ7ºtSS:;;Ýn[\\|ßJrûórœ+ìc4//‹Åå¹gddÜ·:=;;[HF=z”;Ñ,–nýúõ¸|ý[=Ûƒ ‰lÖ¼n‹Å‚ššˆÅbÔÔÔ¸@ÙÔì‰{Âbt¢p_dï gï1çÈždéìì¼oуœ¢8ú´d"Μ#8Gø¾¢¢"œºÚ‡¯Z,~ùü/^,ôYˆ[·n¹OÛ¯›H²×^©{òäÉ1Ÿ}¬Ù÷è~¶öß}¿qÖÕÕ…ÈÈH· Vûœ0ú –””÷‡@l]ÀñFDDD4>&dœ\.Ç@˜Á×"gÕë¶WªX,8pÀí6555B²ÅÝAÚýÌ;Wè çȱ_œ·Šíý$<(TqŽàÁ9Âw)•J4 õ øf¯_>{¢Ñ]ŸeGn{±Nµääd·Ÿžž>e¿×b±x­³½uÁ;ï¼F3á>ºØãˆˆˆ²>úè#À‡~èö j÷îÝhhh@}}=²³³… ûÊË£ÄF‹ŒŒD¿ÛS•§ÓæÍ›ÑÐÐ ,üCDœ#8G7Ù“°Ž_X¸sìØ1aÑ<Çv“myànŒ¶ñèÖ ÑÙÙ‰††dee™ÀMII¹o/{낆†¶. """š%ØC–NSS“°òX-öExìIÀVMsþüy—ÓÝ-î“‘‘‹Åâ¶7ãÖ­[ñüóÏOÙAçýØW‘æâ=Dœ#8G·ÅÅÅaåÊ•èììDyy¹Ûmêëë]’±ö mÇ1eg¯RŸH·=1lï×ìèØ±cxþùç…ç—‘‘ÈÈH—ö$öñ[^^>îx´!ãnÑ1ûc:.8Ùq‰ÊÊJîdDDDD³²pìGãØegg#22Òia{¥Ï¶mÛPSSƒÆÆFh4·IŒM›6Þyç§SŸ5 :;;‘——7éÓSRRpõêU·‹ u h>DÄ9‚sy[aa¡°hÜöíÛÑÐÐ€ÆÆF466¢¼¼ï¼ó"##±cǧý0==]ئ³³‹UUUøè£é²°Ýýökû‚XöqØZ%hµZDFF:-•——‡ÎÎN§mkjjPUUåv±¾óçÏ _¨äåå!22Z­V˜?ìÕØØ8nu­§ì­ ¦ë‹""ò/õõõ(++Ã\®•(0°eœššìV­Z…ªª*TUU¡°°xûí·Q^^޲²2¶S÷ìÙƒíÛ·;%O/^Œ·ß~»wïvzܼ¼îíU»Ÿ|ò âââ°gÏìÞ½eeeÂø·ç©úÂñu‘£žžF@ÃA˜¥€b±XðöÛo{ÔËmÓ¦MÈÊÊrÚ6;;ÙÙÙBõL\\œpyôŠÉömíUA)))X¼x±K•LJJ öìÙã¶zÆ]RfóæÍÈÊÊ­[·„ç¶jÕ*dddŒ»jóŽ;·íOúSáàÎñ`É~à6ÖTFFƸ¿s¼ç4ÖA«»Äû%Q<Ù& ?Œö¶¡§èáîOœ#8GpŽ˜vöýÊÓqeßÇÚ·ǧûo\\œÇí<Ùv¼1ù ãÙ“ùg"s”/²àNoÚzˆPvF#âx#"¢±0!Käp€XUU…ƒ"==‘‘‘¸zõ*´Z-d2Ù„!"ÎDDDDDD4;E[­[°r¹Üåv&dÜáÇq¹ù{‡ªX ±ŒACaa!,‹Ð7Î.==ÅÅÅ“^€‡ˆsç"»²²2ôÜÆà­„?žÏ€q¼QŠð’HÙ‹/bÎ’%.·3!àÚÚÚÐÖÒÀvZ ¯¸¸………Bï¸ÙzŠ/qŽ Îäö9ˆˆãˆˆˆf/&d‰Fñ÷þmDÄ9‚ˆˆˆˆˆˆ|»M&d‰ˆˆˆˆˆˆˆˆˆ¦ ²DDDDDDDDDDÓ„ Y"""""""""¢iÂE½ˆˆˆˆˆˆˆˆ|Tvv6 :5Ä0D Y""""""""%•J!•JÁPù]úÚÛ ¹\ît;²Dä×ÂÛ€'‰ñTrƒADœ#ˆfPD†°Ç6`í2)Rç1 DoDD³V'€ƒðá‡HMMEII‰ÓíLÈ‘_ ž—„ØÄ(•Rƒˆ8GÍ QH‚ç%AžyÜ\„ˆãˆˆÆÀE½ˆˆˆˆˆˆˆˆˆˆ¦ +dÜúõëqùú·8z¶ABD1Gô÷÷£££&“ ÝÝÝ|#}H||<ž~úib EEE8uµ_µX "Ž7"""š¥˜ pr¹aR_‹d0ˆÈïçˆþþ~\¼x­­­|ó|TGGƒ0¥R‰¦¡ßìe0ˆ8Þˆˆˆh–bB–ˆˆüµk×pæÌ‚ˆˆˆˆˆˆü²DDäóÑÔÔät]jÊÃP.N€â‘Ȥ˜;'œšA…%ZˆˆˆˆˆÈLÈ‘O»xñ¢S2vND¶nÈŲ¥‹"""" xõõõhhhÀ€Œ»ÿˆÈ¿1!KDD>ëÚµk¸té’ðsjÊÃx%ÿ9Hç‰""""šzzz`4I Q@`B–ˆüÚà™C8aÁÅ¿„¢¸¸˜ ÃÃÃhll~^ÿJ~´†!ÎD>ꎥ ÃÆã8܆Ԕ…X¿~=ƒBÄñFDDn0!KDþýa´· =½@Cp®\¹‚‘‘áç­ùÿÄ ç"fÀÞ6´õ¡A Ç Y""òIW®\.¿øƒlȤ ù¼r«a @.—»Ü΄l€;|ø0.7_Ã`ïB+$–1(DäósÄ7„êØ‡bÄXñÌR¾YÊÊÊÐskƒ·Fþx>BÄñFDDD(ÀK"d/¾ˆ9K–¸Ü΄l€kkkC[‹murëðBD~1Gtww —¥q/ ö9ˆˆãˆˆˆf/6›!×Br¯B®¯¯¡i1<<,\…„3 ³|Žèíí.+'0èÄ9‚ˆˆˆˆˆ縚! ÑD!Âå¡¡!„¦…c‘óY>G8&ß䉱 :qŽ šfýýý÷æÄ0 DDDDSˆ Y""òisæ„14»?¬ñÌšŽ YQDBDDD4•ŸñrÙ)æ% —M&BÓ±g(+q8GqŽ¸Ç±*Ý©R˜È‹œ²as"""¢©<®fÈe§U‰ãxê0‘·8&ö‚e© ç"ÎÂëU—ÓDÓ5æ‚bä ÑTW34š($¢èn?yÃðð°Ói¸<ðãAÄ9ÂáÚX&,dÖßß›7or§ ¯êèèÀÈȈí‡ð(‰e Ñ ÊÎÎFQQ6Èd8ˆã3>C@î„Ä)…Ë—.]b@È«¾þúká²(zÓé¹Ä9‚ˆs{¯*øâÅ‹Ü)È«®\¹"\žÿBD4äR)”J%ˆf8ˆü€VWÚÛÑÖÖæzLÍ‘;ÁÿFšOÀ:2ˆþþ~\»v .d`hÊõ÷÷£¹¹Yø94å{ºØcðä"1žJæ‚#œ#ˆsDàÎ!IOàö7تoܸùóçs¡)wãÆ §Ö¡IË=¾oD†°Ç6`í2)Rç1˜D^ÄñFDäÛ:€?Djj*JJJœçq†ˆÜ…D xJø¹±±‘}"É+ïíwÑ ì°`”'‚ç%!61J¥’ÁäAœ#vŽËüðRŽ9òªááa§1üðRˆ"<ÿ2CàyI'?¹œí‡ˆ¼þYÌÏÇ[jê½³?®ôÜñ¹çwý^·$ÎiD4õŸïKHÒ@¸íCøÈÈ>ÿüsüÑ”:wî:::„Ÿ'ZùFœ#ˆsÄlâøúûúúœgDSA¯× ýšE!áU<Ç ‘×8&9“Ÿ¾à¿¾î¶Ò~衇0wî\¾aD4¥˜ pëׯÇúW^CØcœVF÷„($a¾Àƒ?òŠk×®9†²({•o4ósDxæ:ÎÄ9bŠŠŠðkˆ°Ç6Lø¾¢ˆ(„¦Ýû»ÜÚÚÊ1GSF¯×;}’ºÂïû5?Èx#"ï‹.b¸ï†­>óÜ*ÏŽ—YKDÞÀ„l€“Ëå'?‚àyI“úPO‰ „9Â!AÔ××Ç9‚8GxQ˜b%‚b~¶ÒßßÏàЄܼyŸþ¹S2–_’Ñt™;w.~ô£ ?_é±b×ñ|zù®ôX§59{¥ÇŠº«wðnÝ0ê®Þëg»zõjVÈ‘wŽ£òèàïÑ0‡aãqá ùĉˆ…J¥bO×ðð0šššpñâE§ëƒ^ŠPÅs~J$ÙD#sçqŽ ÎÓ$ãÏgΜ9غu+–-[æ31ª¯¯GCÃÿeïþ£¢¼ï¼ÿ¿0*?TftMÂʯl1‘Üiºë´)»­MµÑî‰f›b5ßfµâï÷îÞ{zÔó='Ùû|ƒ!-§½SÏ©¸Ù$­9ZmŒ¹7óc¸Ó­ &]tHt`F£óýg2à 0 030ÏÇ9ášëó¹®ë}]3 /?ó¹ªÕ/éž[_&ùßÏ”£qÛülMûÊF]ÿä=Ϩi`¤Åùóç}>f gHÓŒ%_c>7^#†år¹GA'©Ã‡ó®7r·/ÕmsÉY\®®/þˆíîM›Ÿ¥™9ßP\B2Å1f³Y?ýéOU[[+›Í¦††µ¶¶ª¯¯/,ÛÏÎΖÙlÖüùó•ŸŸ/“ÉUõ±Ûí:{ö¬$‰wHÀyO 0ê?–’5óKɵäkºvößärØ<£á€!¯›Ù)!Ëüìqí×ù¯ê½†éúÏ·fhçΚ×ðs¯q ÉJ¸ïûºÑù}nû³n^n䂈¦ÍÏÒtó—Çõ??n^i×õ³oë·ŸÌTö’t­_¿žBdª>ßòóó•ŸŸÏT¦<YŒéÀø{¾'Iºqùœnt´èfOûÀƒW/ÀÄòµ1;Eº-^Óš6w‘nKÉš°9 o:l²;$;eç5¼FÄøkÄmsé¶¹‹äú¼_76Ýh?§›}Iò=‹|ÎÒ$IÓº-5[ÓŒæ yι®\{6‡”0ƒ{‰çLn²Ÿ?çgû¨&±ûÑú¤I,ñÁÿ‡"DPÜô~/” ÉìrifZšÌf³ßã²SÜoû[û¤ENÇ5ÍÈY¡i³S) €˜}ˆKþrÀåéw(cÑíZ³ªPþ~•ŒÆ9žÇ zJ§Þ;-W÷Ÿ'tߪÞ=­¢ï<¥Ýÿ¸Y{þqsX·ð)--•ýêu9¯~®øÿö÷àù¦ ’žˆ‹SêºuJ¼ë.¿Ç d§8›Í&[óÀMt\×û)^#$|í>¿e56¨êÝÓªxù¸jÞ{™‹Â}C<ß@ì"Äœª7^ô[æpô¨ð;O©öÃU¼|LÅ­rÝHî'`rcöo$ssÏV½{ZqÉ_VÙ/^Ñ⻿«Â‡žRáCOiñ=ßÕÞgå×ÞáèÑÆíÕÜEEžuç.* ¸n°Û>r¼JEßyʳýÅwWEßy*`غý'ûü¶½ý'û8Ù!LYCn\¨ó¹kùô ˈm>o¶Žî‚¢OúŒâ>1y^#\ýÝ©AÕ­Ñ«ù÷䎸îöŸìÓ†ï¯Òá—ÿ?IÒžg¥=ÏþJÃl•<ý}ÏzE«þ/Õ~Ø m?ú{­YU(I*ûÅ+Úóì¯ä’<£rGcãÓ{µliŽ¿òœŒ†9*ûÅ+:ú‡S*ùI©*þ×Ïzk¿¿KGŽWéáïxö©âåc*ûÅ+rtõèÀ/wOH_¼FL­:Ñ'}÷{dR<ßâ“5ýö¥Ã®ïêïÖ‹£8ú¤Ïp÷ÅÏ7@ï­(A …-ëtc”o¬¯òÞ¸¿Y§OúŒTŸ˜<¯.—Kqqqv¬ƒ§ pt]Ñ‘ãUªxùØ@ ú£‘ïÆ½ìîŸðóÈ«¥ÊXºZ{ÿy¿OøYûaƒvÿãfŸàµðûTøÐSÚûì¯Tò£¿—Ñ8gTûoHž­#¯<çiWøÀ}2š uôSžuªÞ=í c¼Zê³m÷¾íþÉ•‘~Ǹ×wðuÁkÄÔª}Òçp¿G&Ãó-Î6bàu³Ï1ªÑ'}†»Ïh~¾‚xoE ¦¶ùóçëܹs+[Ÿ9bû§þ8ºíÑ'}FªÏyóæñ„Ñ׈P>ôTÀåé‹n÷ :‡Sò´h[üØjí}öWªz÷´ ¸Ï3íšïø­»fUN½wZUïöŒœ Öšïúícþݹ>A³{ÛÞ£u½÷óà+ÇuäUo¼F|!--M­­­¼æÓç”ï3)))âÏ·yóæ©££ÃoyVj¢vp< Ÿk_MðÛ¢OúŒdŸf³™7µSœÅbQNNŽÚöí“‘rSì·nÝ:åääÈn·‡Ô~ÕªUã¾OôIŸãÝgRR’òóóyÂO¡×«Õðèñ²Ûk´ªÑ0[ùwçüÄTn‹î°ìvI’£«çÖ¿W$I÷~í±!û©ýèì¨Ù`ãÚÎJ’оóÔë¸÷/žï±ò±sçNÕÖÖò{™>§tŸ&“)*žs»víRmm­z{{}–›L¦ÛΟ?T5¢OúŒTŸÑò|ÃÄ2™L2™LJ À¤Ñ/©]Rwk«Œ³fùýçì—””$‹ÅrûÕ«Wû>Ñ'}FsŸ¼FDÇymhh˜Ð@6”y[ƒábÛ=Ìö ¿vß„žãmÃL‰0QÛ庈•×~/Ó'}†7ÀX±bEÈmÇûxè“>£¹OÀøj“ô²$½öš²³³µk×.ŸÇ dA ðµ¹å‚¤/FÊ ³%IÅß_å7W«Ã1Ð~´óÇ+ÿîº5‚{ÞXïm;ºz&dþXÀð¦QFï…_¾êó³ÃÑ£ƒ¯Wú¢Û=S¸§"(ûÅ«~í7>½Ws©êÝÓ²îvï?ÿÊï±²_¾ªÅwW/ãD@˜1B€T½{Z´WÛ~ô¨ºº¯¨ä'ûäpôèÀ/v{ÖYóB•Ýýª^øå«Š‹‹ÓšU7÷ªxù¸Ž¯RÁ×îó½:^Ö¬*TÁ×îSÕ»§µöû»TòôßË<[GÿpJ{Ÿý•–Ý£âÇø¸#„,!xþŸwhϳ¿òŒ25fëÀ/wûÜ Ëhœ£ª?¼¨5ßߥ²_¼¢²_¼âylÃ÷W©ìŸwNè>yå9•ü¤T_€Ý ¾vŸŽ¼ò'"€@3\Ýu›ª7^ ¸<ÿî\9lUž)òïÎ 8¬Ñ8GUo¼(‡£GµrÝÂîóÛ¿ÁÛ´Îpûi4ÎQÅÿÚ£²ÞéÙvÆ¢Û™;aÕÛÛ«?þñÊÎÎVnn.@Ì#` ‚rÀhœ3aÓDó¶Ûz{{UZZªÖÖVIÒ† d±X( b,0J6›Mo¿ý¶ìv{È}$%%éÁd¤`ÊÆJÒÁƒ%‰P€Q°Z­ª®®V¿¤{n}˜ÜdQ:vì˜Îœ93æ~._¾¬Ÿþô§0å cÝ&c(ÛÝÝ­äädN, "ìv»Îž˜~jå¦Y „?2ÇC ?RD¿áæq8Œ½ãþ9êhìSçç’&W({ñâEuwwëý÷ßW[[›Š‹‹9ÉY` âçŽîiÔôV'…LIÂØ´¯&knf¢Œ™ úädç¤ e+**Ô××§+VH’úûûU]]-I„²Y` âçN×ì…3) æ ÆJÒm3§iÉ·æNŠP¶¢¢BÕÕÕZ¹r¥gY^^ž$éÍ7ß”D( €¡%H2»\š™–&³Ùì÷8,Æd¤0Öm2„²Þa¬;„u#”@0Hz".N©ëÖ)ñ®»üŸF‰ª`ÃX7w(›à5åÏÁƒeµZ#~,Ã…±nyyyZ¹r¥jjjd·Û¹0jŒ@HFƺEãHÙÞÞ^}òÉ'ƱnyyyÊÌÌÔõë×¹0j²µPÃX·h e»»»õøã½~BB‚º»»%I .ä‚@И²£2Ö0Ö-Z¦/¸xñ¢'\­îîn]¼xQ \ ,‚6^a¬[$CÙãÇ«¹¹9ä0Ö­µµUûöíSEEFD € Œwë‰P¶¢¢BÇŽÓÇ<æ¾’““µråJUWWÊ`DÌ! €MTëÎ9e+**T]]­•+W*++k\útßìÍ7ß”$sÑÆ…ÅbQNNŽÚöí“‘rS,†5Ña¬[8BYï0Ö¢ŽïP611Qëׯç⌙Éd’ÉdR¥&~Ií’º[[eœ5Kf³ÙçqY )P;mFœ:ûÔÙØçY–0o†î¸Ψû¿rñšìÿÙ«×nz-uù¬3^¡¬Õj°0ÖÍÝïâÅ‹¹xbT›¤—%éµ×”­]»vùdÀ˜œýä‚çûÜÜ\ 2¹o&ÈxŽŠžèëg¨›lõõõÉf³©o³Ùì77È4JBõö»u:{þ‹@6??Ÿ¢À0d@HìWtìäŸ=?/_¾|ÈQ”ÀTeµZURRB!4¦,!©øM¥úú¯IøØúêÕ«) &¥òòreee)//b¢ŽÕjUuuµú%Ýsë ÀäF FÅö™]¯VªõB‡gÙºuë‹I«±±Q)))•ìv»Îž=+IZD9€)@íøÿþŸi $éÁ”Åb¡8Y@Tký¬CqŠ£ÒÛçTë…54~*{ÇÙ;{|_µjSÀ(È¢Ú¡£VŠ…U\\¬üü|Š^$™].ÍLK“Ùlö{œ@-11Q+V¬ÐŠÑÓƒ IDAT+”””DAó,‹Ö¯_O!à±@ÒqqJ]·N‰wÝå÷8, ê¬^½ZÇŽ£Q"))If³Y999ÊÍÍ¥ ˜r¶lÙ"§ÓI!²€¨“››Kð l²²²d³Ù(Âb%€ð Bd³ÙtòäI € È ¦566ª»»;¤¶6›M'Nœ ˆ,bZyy¹êêê(‚›zed·Ûe±X|–[­Vuttø­¿|ùr™L& 0Y,åää¨mß>)0%Èp‹Ýn×Ûo¿=¦»­›L&-_¾\¹¹¹!÷±oß>-Z´Hf³Ùgyee¥þò—¿ø­o4•žž.IjkkÓÛo¿­ïÿû~í0ù˜L&™L&%P `Òè—Ô.©»µUÆY³üþ6#à–cÇŽ©ººzÌýœ={VÏ<ó̘ú¸óÎ;Õ××ç³ì‘Gr}÷º]]]úä“OÔÛÛË €h“ô²$½öš²³³µk×.ŸÇ d¸åòåËãÒÝn§˜@Œ0›ÍJNN¦,ÜqÿÅÏݯɦ·:)0 effÊ`0„ÔÖl6ûÍ÷ ‡@€âçN×ì…3)¶nÝ:¦¹£€Ñ  ÊlÙ²EN§“BÀ4]²²²Bž“2%%E›6mò»‹' :È0…$$$hÉ’%JJJ¢@X­V•””P) ÓÊËË•••¥¼¼<Šˆ:V«UÕÕÕê—tÏ­/“#dˆ2êîî¦@Ÿs]]]•ìv»Îž=«¿HrP`J  Ê”——«®®.¤¶ýýýjjjRoo/…€(D ÀréÒ%íß¿_6›b@bY' ’Ì.—f¦¥Él6û=N „Èb±hýúõ $=§Ôuë”x×]~È ¦mÙ²EN§“B ,˜C€(“™™)ƒÁ@!€0ÉÊÊRrr2…@X0B€(³uëÖoʯŒŒ %%%QHˆBŒ` IMMÕæÍ›N`üÙl6SOOçgþB?²D™­[·*///¤¶ñññÊÈÈPRR…À˜Ýãõ}}}½š››£b¿š››U__p?'“’’].IROOª««£b¿œN§N:åùùË·öãƒ@€)$55U›7o8q<€ñg³ÙtòäI ˜²ÒåƽõÖ[²Z­Ý'«Õª·ÞzËóó—].Ÿ‘¦“I‚¤Õ^Óœ={VÇŽÓ•+W"¶OÍÍÍzõÕW=£c“].}Ýk1vÜÔ 1­±±QN§3¤i l6›ª««µfÍ ˜²¾§³.—ºo…ruuuª««Óœ9s4{öì°íÇ•+W|>B/M°ÐzÿùÖq\¸pA¯¼òŠfΜ)“ÉÖ}¹pá‚ß²Õqq>7ÃØÈ ¦•——kùòå²X,€$mŠ‹Ó1—Kç¼ÂÏžž¿€4œ²]®)~3.Ns%½#É=‹ìµkפá’|«¾é<Æ,Q¦¼¼\YYY!Ï# €©Ãb±(''GmûöÉÁýHôH\œÎJúÀå’-‚£RÍ.—þ:.N9Sìcô÷KÊ‘ô–Ë¥6É3"9ÜR%-º5ò˜‘±ƒ@€(ÓØØ¨”””Ú¶··ëõ×_×c=Æ<²S€Éd’ÉdŠš`,Gò¡]’aܶQ’A’¦ð|¦ ß’Ô/©-ÌÛ÷Œ†eÎØ1é—Ô.©»µUÆY³üþ6#` q:jjjRoo/ÅÀ„2ÜúÂÄH˜.`’j“ô²$½öš²³³µk×.ŸÇ d€™Íæn@,r¨%dD¬#@LËÌÌ”ÁÚø³ÙÌÍÀ‡’Žßúþî[7‹b,bÚÖ­[e³Ù(Ä;Œ•¤ââ$BYÄ0Y¢Ì–-[ät:)||(©{ëß­‘çxè>Û$5X磸8µ»\Ê‹‹Šý¤OúÏ>;]®aoŒF @”ÉÊÊ y´^JJŠ6mÚäwOL~g\.ÙF1ªÔ¬‘ƒ¤pö™0wºçMWçù~IR[\œ.º\2ÇÅEÕ~Ò'}޹Ïú'` IHHÐwÞ©¤¤$Š„ÕjUuuµ^|ñEŠ˜Pé/¾¨„瞓ΠºÍ‚;”ž›;üûÇ0õ™0wº–|k®n›9M’<¡l\\œj¾ô%}}Û¶¨ØOú¤Ïñî3Ð`YÄ´òòreee)//b¢šÅbQîÁ·ùóçG¤ÏÔÔTó §‡±i_oèe?þøcUTT¨¸¸xÒ;}Ò§7“ɤüü|¿å²D™ÆÆF9N%''S LϹ”” ˆz‹%êû´Z­zÿý÷=?c݇²ÕÕÕ’4d(;Ž>é3XÓx9 º”——«®®.¤¶ýýýjjjRoo/…@XY­VÖÐÐ0nlnn®V¯^MÁ1eÈ!2›Í!Ý ±‹@1-33Sƒ!¤¶f³™›€°²X,jkk£“Ø4J€X¶uëVn¢& ‹Å2¦÷./½ô’¬V+…Œ FÈe¶lÙ"§ÓI!Œ»––Ùív AŒ Êdee…<'eJJŠ6mÚÄ]S JÈ0…$$$hÉ’%JJJ¢@X­V•””P@1­¼¼\õõõL Ï=÷œ~ó›ßPˆIŒ@€(ÓØØ¨îîn „ñ9×ÕÕE!²D™òòrÕÕÕ…Ô¶¿¿_MMMêíí¥ü<ñIJX,"‚d˜B.]º¤ýû÷Ëf³Q ~ÒÓÓe2™(DÈ@˜L§@h,‹Ö¯_O!4YÄ´-[¶ÈétR0)ìÚµKgÏž ¹}KK‹’’’˜¶ ‚˜²€(“™™)ƒÁ@!€0ÉÊÊRrr2…1ᥗ^’Õj¥ÄY¢ÌÖ­[C¾)W||¼222”””D! 1B€)$55U›7o–Ùl¦@Øl6¢“,Q¦¼¼œÿñ0!-Z$“ÉD!"ˆ@€(ÓØØ¨®®®Ú¶··kÿþý!ß ÀÔöä“OÊb±Pˆ"` q:jjjRoo/Å€(4¡1›ÍJNN¦,bZff¦ CHmÍf3ùae±XÔÖÖF!&1¦,@LÛºu«òòò(˜,˘޻¼ôÒK²Z­2‚bb„loo¯ª««Õ××7¡ÛéèèÐñãÇ>–––¦üü|®8Àˆ¶lÙ"§ÓI!Œ»––Ùív A1È–––ªµµu·c·ÛuìØ±!ß°aiŒ(++K6›-¤¶)))Ú´i“Ìf3…€(S{§é„¹£Ï§o›| 'z„. Z²d‰’’’(V«U%%%A‹‰²O?ý´JKK}Ñ9iñJœç{øó²GÝwâ¼éJûj²®õÜðY~ù?{uóºËóóòå˵bÅ ®8€(S^^®¬¬,æ‘“ÂsÏ=§ÞÞ^=úè£c’Љ²f³Y;wîTbâëÕ¶kJ^” ù³=_3fÝRÿs3}ú¹võ†_[\\ÌÕJcc£º»»)Æç\WW…@XL‹•ÊÞ¼îÒ'';Ô×ñù¸nÇö~—çû=?ÆF«¼¼\uuu!µíïïWSSSÐÓõˆ-O<ñ÷8аi±t°ÊÆ"íÒ¥KÚ¿È70µ¥§§Ëd2Qˆšk¾uëÖáøøxedd())‰Qh%ýô„±0õóI‰ÁfÌŠü¯ÕÔÔTmÞ¼Yf³™“„ÍfÓÉ“')‚F {K°¡,a,L^«W¯ÖòåË}ΈSê=³|¾²V™tÛÌÑÿŠÌ\9OwÜ?ǧ¯„¹¾ÁnZZ¿7€(ÓØØ¨îîîÚÚl68q‚"€°±Z­ª¯¯§“ØtJð…‘¦/ Œv»}È ®‡š{2gÏž ¸Üd2 ;¿$€Ð¹_·«««% üܵ«7dþêØçl1ë6™þê‹©:Ï÷©¿ó‹ÿØKKKÓÎ;™Nˆ2åååZ¾|¹, ÅQÏjµª··Wyyyc’"d¨Pvö™ê¶9=ëÆNí¶ƒ†e[¥¥¥C>ÆÛÀÄʺÿ³m>ÝÕÚ¾´··kÿþýÜ1@@O>ù$Ÿ Š0Ù! eÝc§¾Õ«Wû÷³¦))u†ÏWê=³4cÖèÙ´¯&ûõ5øF@‰‰‰Z¿~='˜`ÊŽ5Œ+§Ó©¦¦&õöör‚ 1eÁ0O_@›ç]’n\s)½(9¤»®673Qs3¿|û:>×'';ËkkkUTT$Iª¬¬ô9?Ñ,ØýÌÈÈPqq±¶mÛ6áaóhCYÂØÉ¥¿sà|».¦¾Á¡ìD‡±ÒÀh½åË—Ëb±ŒþîïWSS“± ÀÏO<¡¿ú«¿¢4=Ù¸q£jjj"z 7nTEE…*++ýÙá”””xÂØôôtOÛÑô.µµµZ»v­š››ýÙ©®¹¹Y{öìQEE…jjj¢&”%Œ|n^wùn€äÊJÑ;MÁ¥K—tèÐ!íØ±C¹¹¹œ<„Õ…ïÖm3™ 𥧧Ëd2Qˆ K [[[«={öhÏž=;ÐŠŠŠ!sÞ 4Š÷èÑ£ž‹µ¶¶Ö'è®]$9rÄgZoF£Ñ3 Äd™®ÀÛ²eËTVV6äõuäÈ:uJÍÍÍZ»v­*++'|ŸF e c'ÜÜ\%&&ª¯¯/¤ö‰‰‰_1À;”eÎXà ÞσþÎ’nPbÜÍë7uµíÚ¨Ú\»Ê{ÄŽéáÚÐÞ½{µfÍš¨œku¨ O˜Àý‡øà s¸vÑ&???j¦U…Ñhrú‚ÂÂB•””¨°°P§NRUU•jkkÃr­ ÊÞñ•9úìO=„±“HYY™BjK;Ü7ú2›Í„±€×{ ÷<õc1oÞ¼Iyü‹EëׯçBÀKç }r²“BC˜ð@¶  Àsc¦h˜ºS—{.`i`¾àp…ÿBYﻯÆN«à:FÏb±(77W—/_ž´Ï­-[¶Èétr2ƒñ°Àà‡áíÚµKgÏž ¹}KK‹’’’˜¶ ‚&<-,,T~~¾^xá…q™º ¹¹ÙÓ—{ ŒŒ ­Y³F6lðÅê¾I˜ÛÁƒ=¡{žÕŠŠ µ´´(==]ÅÅÅjnnÖÁƒ}Ú:uÊÓWAA ýÚrôèQUTTÈápxFmfddh÷îÝCÎCëp8tðàAUUUÉápøÜŒ,??_Û¶mói[UU¥S§NyŽËû¸Ýûê}L6l¸íÑÖÖ{ÛÞµÛ»w¯š››UUUå9ÿƒ÷y" 7ƒ{?½Ï{ ƒëL}ÜÇçʺÆEff¦ …À¤a2™&õú¬¬,Ùl6N$c`6›µ|ùò1ÿ'mRRRT~ºz*y饗´jÕ*­^½šbDHœËårMHÇqq’¯’’åçç«¥¥E’TSSðɵgÏO h·öîÝ;l˜k4UYYéÓ·{?qoÃýQ÷‚‚UUU©ªªJEEEC¶Û½{·öìÙã×ΛÃáÐÚµk‡&àùçŸWII‰Ï²ŠŠ mß¾Ý3UÂPÇyàÀ­Y³Æ¯nCí«÷1UVVú…#m×h4êðáÃ~íÜÛ.((Pqq±6nÜ8ä~>|سϣ½ŽÕx°ŠŠ Ïö_c#][®[ïk­ªªJk×®ö¼8p@+V¬ð„²„±BÕ××r8ÔÞÞ®wß}W?þ8¯?@žsî?@)$›±Œ---%°°Ü×h4úÜTk¸Ðn(Þ#kÓÓÓuàÀUVVª²²R»wï–Á`ÃáPQQ‘jkk=íÜë¸=ÿüó~ËËÏÏ÷[gÆ žeC†õVTTä ½Û>|XË–-“$mß¾]GŽñ´©­­ÕÆåp8üޱ²²R6l4öz×°¸¸Øçqïãf_ÝA¦{»Þ5\Û¡BÑ3gÎhãÆ2 Ú¶m›>ì·OîmL„ªª*mß¾]Ò@x;žÿ›æ×‡–-[æs^8 ôôtÏñ?^;wîÔªU«cDDjjª6oÞÌë&6›M'Ož¤ZØnêUXX¨mÛ¶…4uûcð’´lÙ2UUUù|<½°°PkÖ¬Qaa¡'úˆBLba dCºÀ}3¦eË– {c(ï0l¼GI«¹¹Ùž®Y³F.—KÍÍÍžc:räˆjkk‡ <'‚»NéééÃŽªõU‡ »‡j?7ó2 *((øå¾ñ{Z…P¦ÄŽ÷ôEEE:zô¨_H[\\ì™+ƪ¼¼\õõõÀ¸[´hѤ¾!ëTpÊ‚·ß~;èÑ«#©®®Vii©Ï²x@ï¾û®jkkõ­o}Kßüæ7=ëºy·qïKrr²__ƒÍ;W:~üxÀ:tH§OŸöYÖÚÚêùw¨þG vçÏŸ÷<ÞÞÞ>âþçüùóêëëÓgŸ}¦ÎÎN}úé§úì³Ï†<–¡ê7x¿¼ÛýéO¸¦Oq_333uþüy½÷Þ{žu½·ù»ßý.¤ë!óçÏr²éÕ«W«££CGU}}½***TWW§G}4¨Ú³ŸwÜq‡>ûì3ÕÖÖz‚öÌÌL-]ºTK–,ÑwÞÉ« €qÓØØ¨”””Ú¶··«¢¢B³gÏV||<ÅÂd,ïùFÃf³)P-**RuuµOVËòóóµbÅŠ°nÓ/µÛí:tèиmÀn·ûÝùmÉ’%ª©©Ñ•+WôÖ[oiæÌà*ª‘ IDAT™ºãŽ;d·Û=ëº[\ooïˆw‘KHHv]›Í¦7nøõ;RÿŽ#P;ïÀ4жFòÙgŸ©®®nÈöÏœ9S×®] Øÿpõj¿úúú$ Œ^©¶Žw¤sLƒ̹ÿêW¿ª‹/Ên·ëÏþ³²³³5gΜqÙÏ¿ýÛ¿UuuµÏ²óçÏ{‚î9sæè¾ûîSNN¯d"ÊétÊn·û¼î˜xc¹Ó1Àh¸\®1µïëëã½Ë ÷q999a½ß”—/_žðÆÇÇû|¼ýÔ©Sr:ãÒ·;¬œŒtüøqO;sæLÝ~ûíZºt©–/_®ï}ï{žÑÄã­§§gJ<‰¼§GÏ0Â}ÍnذAJOO×Ì™3}êWUU%«ÕÊ+1Äl6‡t30D÷ Äp™>܃ s§ëöûç„Öó¯þ1f&hñ7çú=¼XsÕ麨÷ÿðgõôô¨ñr½Œ™ Ò­Oàû´¹Õ—kŽ3`_Þì¿á–ü·;öqûýs´8Ï·„÷§K¤„yÓýûæ8µsÕ÷HÇoµùÒ -þÊÐûûYS›îX¼@’Ôwµ_ÿòòÿè7)^Oü÷ïiIÞ"¿6ŸÔÿÅÓÿàc9ï¢~ƒöË»ñp²—ºu}F߈µí?2pqzïpÛÍõ0R»€ç&ÐȸÈ3à ãÕQíçgMmžï‡ÛÏ»´PÒßxÚüGUNW~¤þ^§êêêôÐÓ_ÓÜT¯fBÖôVç˜ûXøåÙJœ7ƒbA¸tºß3'ýh™ÍfÝõ4]ºrB€°èùÌ©»Óî¹}ÂÜé£Ëf¦¨ ÿÞ£þÎÏ#²íaÙi3â4{áÌ1m`æìÛ†ìã»[ŠôŸ§Ï©ãb—Þÿß•}oºç1ï6Y÷¦«±¦EÍ ­ÃîO빋žïdÎ ¸nâ¼é~Ëo›9ÍóïPý:Ž@íîY˜%í¾µ?MŸê¯¿»4`ö ýüÿ®$}ïÇß”i¡AýWF ÿ]É·tÏŠ¬€ízÿãêÇÛò©Ïz3gß°~’”xazÀä~9CœøPÿrIÓfÝTÒœ„!÷·³½K’”þ¥Û=í‡Ûæh¯‡á wn|®¦O=ßç=°dTûÙöjÀýWÓp½¿þö@ÖqÁ¡ÿóÆ™!û;qàÏ÷÷<p¡¶1ž²n˾wV½=ý×ùàÄŸ}µ_ìòülº}è:qfœ÷5C’Ô×Óï³OƒUúà‹ó1(”Œ­ç.êƒ7?ôü|gÖÂ!× xN¼Úz{ã×§ôƯßñ¹¾ü®åÙÜ8ÀøYö›Àx»øi›®õÜ 4-Ò;}o† ùʰëÜó@®æ-)_ÿù[:WÓì·Î¿>ó{}øNƒ$鯿}L·öõicÛ„ÓC?øº¤ùågïÊž«iÖ‰_„{Y÷¦Ët»QiÙ {{úõ¯Ïü>è@y¨Ðq°¿yh™îÌØöë?{+`à]õÚž}ºû\¥e/ ûuÒ¥_çjZ†üúÝÏÞÒÏ~ü’únÕúÛ?øºÏh_ï€þågüÏɉ_Ÿò\?ƒyÿ‡À¿Þ;øÜ¸ÃÚÄÙñCÁ,ãí³Cž“2%%E_ûî}Jœ7B@ŠŠ¿Ö¾ýƒ}ønƒ:¼F‰zKš“ >»N?Ûú/êëé×϶¾¤´ìºç\õ^q´½à$Ý™µ@ßûñ·üú˜·Ð Ž‹]úðmýÚÿ+Iúù{?ãq‡ÌU¯ýI¾Ó s5?×=ä*qv‚kšÕzn Nœ¯ÇÿÇw=mÜS3|øNƒ~¶õ_þOߨí§^~æ÷:õÚž‘³ƒkëÞßpk=צŸmý— ÖýëoßãwÌiÙ =õm=צ=ë~®´[Atkc›úzúugÖ%ÎIPã ÐûoZ¦NœQcM‹>xãŒÎÕ´(-{Ò²è\M‹§½$}ïÇßrÚ‡„„Í¿cžºz[)V«UÕÕgt÷“ (‚2-vbðÔ¤e/Ôþ¤g:€Ösmzã×ï¨êÐê¸àPâìx>òý¤bsÀ@ìï~üM%úXy°£HCñwÛ¾¥>óˆgÇLðÆUúÀÆfÝ›®ÿ~`³ÏHÞ>³Î3Zõ\M‹Þøõ;žc”Ë¥oÿàëÚöó'¿Kß;ë³Í¿yh™§>nÁŒ¨MË^¨=¯ýXwßEÚz®MU‡>ðÔV’ ùŠ~üó'£:l¼û\=ö?¾«Çÿéá€ÿð™užcìëùbÄm_O¿î~ W?þù“CöýÃgÖ鯿}¤‘²¾Ó 7~ýާý¼…ýð™Güæ˜ѯöª¯¯§`R8ÿf‡~ó›ßPˆI,Îår¹¼444hß¾}’¤¤ÔÊ\9/¤ŽÝÓ Ì[hrú€ÁZÏ]Tß•‘†Ù·BÇ@ìúðݯuÓugÖÂÃÂÞž~}ÚxQ}=ýJœ“àiãÞnâì¿ãwõóöá;ÿ¥ÖF÷¨Øeß›>ìú­ç.}^Çç]û‡:.ºG­úw빋!õœϾ¹}¨6Þ}Ÿ«iñôŸ–µ@Y÷f »¾{ÝáÎY(׃w»ás~g'èžr=ûä>§Cí§ý‚CŸž»è9ŸÎŒÕ4ûl}åŽCž¶àüÕu¾÷C é£iÓòåËe±XFÝv`„l5#d@Øœ³Csg¤êÑG ©}ii©Rù³c¾Ž½í×%I;vìPnnnض=a,ÍX¡þþ~±} Ž¤Ý6sÅ&ø9G Âm¬lý'ªyZf̺-æë©@–¿Ô˜B.]º¤÷~ÿgõu|N1ø¹=mȦ±‘F a2¡±X,š™×«Žk)‚B €˜¶ì;™ZzÇR &…Ì•óôÍ”GCnñÓ6]»Á´Ä”DÃÂY2 ãí³•œœL!@L8ñúÿVǹ> AŒ Êä¯ÊRž!/¤¶ñññ2ÝnÔm3ù?WˆFüµÀ’ššª¾_‰óø?W l6›ZjÚ(‚F €˜æ¸pEÝÝÝ!µµÙlj>Í ½@øtžïS}}=…˜ÄdÓÎüá¼êêê(˜:Îõé£>¢“ذŸgìïü\Ÿœì J„Qã•.%>p»òòB›G¶ó|Ÿ>ùßß@8ñž„Kçu)5ôö)©)êjîÐÕ¶k1^ÇÏ#¶íaÙ›×]ºÚv+€°º®®®®Z¶··«æƒuµ›ßß@8ñž„‹ËåSû¢Â":tHÎî3Bü¦,ÈÍÍUbb"•`r:!Ï… ±&11Qf³9¬Û 8B¶¬¬L œ"`ß¾}cîã‘G û› ŸsîçÙòåË)$‹ßþö·cjŸ––¦;vPÈ[rssþÍéÑ´3`|˜Íf~—AÊÌÌ”Á`ù¹f±Xø„›o|ãjkk ¹}RRƒ7"l:%@,Ûºu«l6…QÁn·«ººÚoùòåËe2™d±XtöìÙû饗tÇwhþüùžeóæÍ“Åb¡øaB @”Ù²e‹œN'…bпýÛ¿éü£ßr£Ñ¨ôôô1÷ßÒÒ¢––Ÿe‹-òŒšMMMU||<'bŹÆzk60®úúúB­×ßß/§Ó©¼¼<%%%QL`‚ŸsÒÀ´LYÆKoo¯ìv»f̘Ööž8qBwß}·|ðAN›F ˜:´dÉÂX L¬V«JJJ(7î9^.\¨äää°lóĉúøã•À ¦, Bìv»ìv»ßòY³fKÿCÍ+•““Cñ/åååÊÊÊR^^ÅQeáÂ…’¤ÖÖÖ gÝaì† ˜G6Ldˆ€ÎÎÎ!ç†*,,Ô}÷Ýrß½½½’¤ÒÒÒ€ÿò—¿Ô´i|HpkllTJJ …Qéâŋڿ¿V®\9îÿL²D@RR’²³³}îlêf0ÆÔw__Ÿúúú´nÝ:¿ÇæÍ›G L"999JKKÓ›o¾)IãÊvwwë“O>!ŒY" >>^K–,Qyy¹ •šš:îÛpß%U¸ÙWFFFØæ 0>’’’´sçN•––Žk(›““£gŸ}–ûODCdˆžž]¾|Y‡R{{û„mçĉzýõ× c€IÊʺGÊÖ×ש?÷ Ãc#ƒ@€1›ÍÚ¹s§¦M›6a¡¬{N¨o~󛘋EeeeL8w(›­Å‹‡ÔG}}½'ŒEäĹ\.e rl6›JKKuóæM­[·nܦ/`‚~ 8}ô‘œNgȘ˜Íf%&&RHV/^Tww÷¨ÿ>رc‡rss)`1B€ó);Ö ~³E Œ,++‹Q"`ÒÍHWï¿c#²D »Ý®Ù³gËf³éæÍ›cꫪªJK—.%Œ‚Ð××'›Ír{FÈ€Húå/©… y£/kDŸé”€è`2™$ „;c e.\¨Í›7SP l6›>þøc­Y³†b€°ëííÕåË—U[[+I~¡¬Õj%ŒBŒ 9N>}Z7oÞÕœ²LÐŒÞXæµZ­ª®®Ö‹/¾H!@Dôööª´´T­­­Z¹r¥O(k0táÂåççS¨(2a#dO:R;ƒÁÀE2ŽõOOOWFFFÈý477ëèÑ£r8žeF£Q?üð˜ú />>^¿ÿýïÕÛÛÔ¾ª««µråJÂX åååZ¾|9£FÀ¤”””¤;wª´´To¾ù¦¤‘²îÁ , HQfÂÙÂÂÂÚ¨ªªjÜöãàÁƒZ¶lYÌ…¼îúïÞ½[{öìuûªª*íÝ»wÈsQRR¢üü|=ÿüó!Ÿë‰ÐÜܬ#Gލ¤¤„g7€IïþáTZZªC‡ ʺç„Z¶lo¶€äÊÖÔÔhÅŠ ÖˆbÓ¦ê9©¸¸Øgt'FVQQ¡¢¢"Ÿ0¶  @JOO÷,«­­UQQ‘***¢b¿ËÊÊtï½÷êÈ‘#œDS‚ÙlÖÎ;5mÚ4:tHííí~ëxOÐÏ'L€Øåeò“ŸÆF¹ ¿©×²eËTVVôúF£q\¶[[[;®#mc…ÃáÐöíÛ% LQVV¦ââb¿uÊÊÊTVV¦®®.mܸQùùùŽ9Bø`Êq‡²î‘²›6mRBB‚$î– Œ§þþ~¿eíííºvíšßòùóç{ž‡Ñ$))‰"LÈƨúH;†WQQá 5+**Þ1Øh4jÏž=ÊÈÈÐÆ% ŒN–‘²0Õ¸CÙ¦¦&%%%éæÍ›ª¯¯'ŒÆQgg§L&“ϲßýîwjnnö[wÓ¦MºóÎ;% ÌÏ6^ ¦Sxs*6 ÃXoÅÅÅ*++Ó™3gtôèQŠÈl6Ël6ËétÊf³iÅŠúÒ—¾¤ÜÜ\ŠŒÑŽ;”””äÈ>þøãêíí ø|t>1™Lºÿþû)"‚ÕsÈ–””¨¨¨Èo>Soî¹bÝóÅJRQQ‘çc÷’´}ûv¼Ñ“ÃáÐ /¼ ¢¢"ÅÅÅ)..N÷Þ{¯¶oßpD„ôÅ«îþöîݫŋkîܹZ»v­g_ÝûU[[p;EEE:xðà°5p·[»v­/^ì³7n÷iÜ£c»ºº‚Z¿¸¸XÛ¶mÓ¶mÛ<ËÊÊÊ<Ç>T ŸãÁçæÈ‘#Z»v­æÎëS¯^xÁoZwgΜ‘$9sƧöƒUUUùÔÓ}Þ†;ƒûó>—ƒÏ»»ŽÛ·o×½÷ÞëYgãÆCÖ#Ðõ±xñâ÷ @ì‰×’%K”œœL Œ“ÜÜ\™Íf¿åf³Y¹¹¹~_|câš ’\’\!÷QSSãé'##ÃÕÙÙé·Naa¡gÊÊJŸmþ¼/555.£Ñ8äú’\ðÛæîÝ»=ý=ÿüóC¶qÿ|øðaW~~þÛÈÏÏòøGÚ¿¡öÑýØîÝ»GUsïã)..óy{þùç‡\¯³³3àzÅÅÅïÑhtÕÔÔxÖ/((r]÷5áÞÞš5k†í»°°0àuì¹<|ø°«¦¦Æ•‘‘Ô¾»\.WSSÓë{_#ö “KT².—o@¸mÛ¶!ó+++}{þùç]•••>A˜whh0<ë455¹>ìZ¶l™OÈæÍÈfddxÓ‚‚WAAË`0x‚3ïN’ëá‡v>|ØUYYé:pà€+==}È಩©ÉÓÎ`0¸8à9†ÊÊJÏ>¸¿šššÖ´lgg§Ë`0øá{öìñ Gâ>¶¡ÂæÁçÏ]³x–=üðîÊÊJWgg§ç˜Ýû–‘‘ás.+++=çlÙ²e®ÊÊJO[7ïð¾  Às¾++ÿÿöî&6ªêÿãø5 Mf:Zt3jä¡£&ˆm S]š–&ÒQ·´¡†¡,Ä-jšLc\´ÓqÓ΄®Ì”'dF…S„ ˆ¡½5”Âü¿ÿ=ÿ;Ó™ét*üÞ¯¤qz>÷Üsn]|9sî±T H+Êæzž³¥s,ÊÊÊLquûöí¦µµµæ˜@ –m·Ë9Îö=;Ï›éXàßgÎ ²vAoº?Ù8g@Ú3ÕlEßcÇŽe%isÁ²oݺe |'­°ç,Àå:ßÙ¹f±:‹Ÿ™÷°}ûvsn®|gA33ÿ~ŠxgΜI+Ê:‹‘UUU©ýû÷OY u¶-³Xl³gšÖÖÖNë\…|gnæ¸æ;×YèÍ,ìg;&WæêSçxå3»}Çl»téRÞgÄyž³ €‡Óœdgú“Í¥K—&ÍŒ´‹yÅÅÅY ~ù ²ÓýJ½3ÃY,sds÷œ}ðÊ+¯ä<Æ9ÒéóÏ?O½ýöÛ“fSföK®"áýΪ¼uëV*d-Ì:‹íû÷mÙúȹß9Ù.>f›¥êì{v«S¾‚¬ý¼,[¶,ï}ÛEøÌ™½ÎüTÏJ®1s>7S=c™ÙöÌY<Üž˜ë5j‹‹‹µzõêûÊ(++Sgg§6mÚ¤¡¡!=ÿüóæåH*++›Q^86ŸëêêrWUU¥ââbŽŽêøñãæ¥a™ÇL%ß1«W¯V$™´}Ïž=󺶰ÇãQgg§FFFtüøqóc¿8K’†††ÔÜܬÎÎN;vL'mÌjkk‰D‰D´ÿþ´|û÷âââ´1(++Ó‰'Ì‹·>üðCÕÖÖÞWߌŒŒ˜—qåo{ÿï¿ÿn^Äæ¼'{¼¦ó¼N—óûä“OôÇ(¤eLçÀÃaÎ ²«W¯N{ûülÕÕÕ™Ÿ]ŒÝ¾}û”¶lœíùè£ò»`ÁI2×Ì”Y°›í1S9q℆††444¤ älÓƒäñxTWWgúyddDápXÇWWW—$i``@ëׯי3g&™=^iÅL»YännnV8Öèè¨Âá°Âá°<ªªªTUU¥ÚÚÚàíb¬}]gQ9“³O&C§sí™þDss³¾øâ ŒŒhÏž=Ú³gÊÊÊTUUež{<²dtèС¬'466jùòåæ÷ .¨££#çîÝ»§ññqIR0ÔÅ‹gY^^ž6›tË–-úꫯ²fVVVšÏ:{ö¬É<þ¼Ù7ÝbñùóçuöìY-_¾\wïÞ5Û;;;uøðá´c_xá555™ßïÞ½›³?þ¸ù<>>žvï7nÜÐéÓ§s^Ÿ~úiݺuK’Ô××§k×®™þt^ÛyïùÚ91zê©§´yóf­]»Vÿüó~øáݼyÓ/ß{ï=“¹yóf577kttT›7o6ÅÍ«W¯š{Ú²e‹y>.\¸ C‡Éï÷ëÔ©Sæ~ì"p8Vss³Þzë-•——g£ÌgîÂ… joo7ÇØEíéhoo×áÇÓú³´´4k^½zÕ|^´hQÖgþ·ß~3ÇØÏ’$}öÙg×Áƒõ÷ß›vvvvª³³SG555úòË/åñxîûïh6ãN&™d’I&™d’I&™d’I&™d’ù¨e~ýõ×Z·nü~ÿüd¯\¹"Izíµ×&훘˜0ûíß³÷í·ßN:~ÅŠzî¹çfùÍ7ߤýþñÇkïÞ½93mׯ_WII‰É­žž³¯¨¨H/¿ü²***äõzåóùtïÞ=Õ××›B¡}Î{ÍÙŸ™íŒÅb:}ú´&&&ôÎ;﨨¨(ï½ûî»Z³f>ýôSIÒ“O>©+V¤·aÃõööjxxØ´á»ï¾“$­\¹RÏ<óŒ9ÞÙΚšS¾xñ¢âñ¸ÉíØ±cÖÁ«V­’$UTT¨»»û¾ÚÕÕ¥ÖÖVI’ßïW¿©‚gV·íâbCCƒ$) ¥Í˜Ý¹s§z{{%IçΛq[‚Á ©´ç;ßîƒ\mÌ—õꫯʲ,¹\.8p ­ýÓ¹Æt®©§§G»ví’$8p@ÕÕÕÓ:Ïnk¶±N$f¹;Ó>¾¥¥E@`ÚýFÕÓÓcƾ²²R¡PÈìߺu«âñø¤v8Ÿ…¶¶6SÄžÍó<›çm¦ÏͲ,E£Qutt(™LJÒŒû Ùµ··kãÆª©©)èu˶±¦¦F»wïþ×tÎðð°)dUTTèàÁƒf*qGG‡b±ØŒò|>ŸùFóÛÑÑ¡ŽŽ%‰‚Ýo,“eY’¤M›6å,ÆÎô¾d¿Ø,Ë2m­¨¨Èši//FFÓî-3«··7gWWWëàÁƒæ:Ó½gÿMu_±XLêíí5í,ijmÿcƒ“ÛíV}}}ÚK膇‡ù¿%ÀC챇¡‘f¶h[[›$iß¾}r¹\’¤]»vͨxæ\Â9Ã2SOO‚Á ‚Á`ÚWæ ©´´4ç¾|mŸ ŸÏgн½½æ¥]ùØã!e/ÈJ23RûûûMÕï÷ËívO:vçÎ ƒiË5d²ÇÝþïtØÅßþþþœÅu˲´k×.3æÙÚ÷ õ÷÷+ ª««kZæB´ sgÎ ²cccŠÇã3úqWƒÁ ) 555™¥ÛíÖ¾}û$ýw½ga0“ýw[ii©ùzy,Ó¶mÛ&tc±˜Ét¹\“fsÎ%çLU{i…L­­­ÓžÅ:---¦ÐÙÚÚª†††I³E-ËR¿Lá´¢¢"çL^»ï,Ë2³=³-àv»M±< e½¿h4jÆ3×’ ÉdrÒLR狹&?-ËRCCƒYÀyü\r>WmmmYgÀN§è €‡Ãs}D"¡­[·Îè{ ÎD"‘¶TAæÚ™ÕÕÕf=Ùžžùý~S¤«¬¬”ËåÒØØ˜ºººÔÕÕ%¯×«£GJ’vïÞ­x<®ÁÁAE£QÅãqù|>¹\.%“ISv¹\êîî.èÌD·Û­††…B!% mذAÕÕÕr»ÝJ&“ækÿöRƒƒƒlIŸÏ§––µ¶¶jllL±XlÊ™›ååå:pà@Þû±Çɞ霫˜ÚÒÒ¢x<®±±1mÛ¶M>ŸO^¯W’ÒÆÅëõª¥¥eR;âñ¸’ÉdZa·²²R¥¥¥jkk3³©äóùäóùL®s)…Ù¬3;Û±¶Û•H$ä÷ûUYY)¯×k Ëv‘8ßò˜™>ø@/¾øbÁ¯›u†ì¹sçôã?Îk‡X–¥mÛ¶IRÚR™ò-]Y°³ [¶H$¢ÆÆF¹\.Y–¥X,¦h4jŠ~öË¡œ3V ¥©©É“ɤºººÌWùS©”‰DLÛ~ù嗶橽né¦M›ò. `E#‘È”kg3ßlãÒÒRuww›™ ‰D¬=k‹ßïW(štͦ¦&S¤¶9—š¨¯¯W(Jøèï8IDATËîéé1köº\.566š™×…R__¯¶¶6Ó×±XÌ´+™LÎ[»eË–-SIIIÁ¯» •J¥279rD}}}Ú±cǬƒïç…Sv‘Ñ.À¹Ýî¼EQç,B¯×›¶îªeYi³Gó½ kppP–eÉëõšÙ“S]/ߌE»2Û4“¬ááa3ëÓív«¼¼<í8çýù|>S¤œÎµg2–ccci³SóõO6‰DBuuu’¤p8<­s÷.ý·@î¼Ç|ײ‹Ó¹î¦ÙSõg®q˜ésã|³7Œ… jéÒ¥¿îœd§½{÷* ©¼¼\‘H„À¼š¯‚ìct=æšóe^™ëóáòå˺yófÁ¯û]¹‹Å´`Á³þíT/ó ©»»[7nTMMMA¯KAs"«££#mÛ¾}û¦\ÿx”e-È®\¹Ro¾ù&½ƒYs^½^¯™ €ÿyY ²«V­Ò¢E‹tçÎz³dY³b€ÿÃK½0§(Æÿ‚,€ÿ9K–,QIIIÁ¯» •J¥²í¸|ù2Kx$-\¸PK—.-øu³Î=räˆöîÝ˨ÀÄ’P d @žÈ·sxxX¥¥¥iÛ,Ë’eY“Žu»Ýr»Ý“ÎφL2É$“L2É$“L2É$“L2É$“L2É$s>3O:¥õë×ëõ×_Ÿÿ‚lQQ‘$éûï¿WkkkÚ¾3gÎèèÑ£“ÎÙ°aƒª««Ó¶µ··g½(™d’I&™d’I&™d’I&™d’I&™d’Iæ|gŽ«Ð¤R©T¶W®\ÑíÛ·µjÕª´í7oÞÔ7&¿xñb•””¤m;wî\Ö‹’I&™d’I&™d’I&™d’I&™d’I&™dÎgfQQ‘–,Yòï)È,^êBA „‚,Y(ÿµ}?;á@ïIEND®B`‚ceilometer-6.1.5/doc/source/3-Pipeline.png0000664000567000056710000013312613072744703021511 0ustar jenkinsjenkins00000000000000‰PNG  IHDR R`CObKGDÿÿÿ ½§“ pHYs  šœtIMEß  2ƒÇJ IDATxÚìÝ|Tõïñ·&f2a†ü$™!˜ “t –° n ( ]AAº’ªX…îcmq½ZVí]õ¶BïãÖßm÷b•âþ(X~x]°pïâš Tƒ ù&$ÌHHläþ1ÌÉL2 äÇLÎëùxôÑ33çÌ9ó9“€o>ßï÷²sçÎÓºœæFH˜!!`rÑ=½ØÐР††ªD0‡Ã!‡ÃA!@·º ÷ï߯M›6Q!`X¾|¹¦OŸN!@HÝ7¦ƒ>øû=èItovº¢µU1mmT ˆ m11úÚb¡à‚z&54ÊY[Kµ€Rëtª>ÕI!À±º1`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`rÑùæîyó”Q4G’TµoŸ>þÝÖ^7ç$IÇvíVÅŽý~]»]­OÐsIn·ò~¸J’tð—Ï©±¢¢Ë1±r¥Üóæ=ÿæ=+z’Ôtì˜Z=ÙœN=Z‰îLã8ÿ>’T_Vò¸Püû¥|+Wm^¯?­èq?›Ó©ÄŒ %ïVë>( 9ü4ÉíÖ#FG$'ŸÓ¿¿Ÿ¯¿úªÇù!“Ün%çæé«Jöõ¸¿ÿ<Ÿ)ðþ4~Z¡“‡]ðóöö¾'w׿­«W}YY¯ö°9JÎÉ‘-%9è¸ÀûØÓÐ^›Ó)WÁtãÜÝÝ‹Î÷Äÿ=ëm]üÇùïç#F„¬q_B´ðdi©>ÞjÓÕ‹n½äaÇ®ÂBå­Z%›3¥ËkÞÚ:xæ™ !Á‰™™úÎù“ýæüÏ_Hò…—‡6n Úç­V«®´TEE]æ0ô'ù†‡:®³¼U«”1·¨Ë0àVGåo¼¡²›ºã_m¹ób)’”]ì;ö¿ž!hõ缿ý¡$I’{Þ\c5fÿhÿ>õee!‡§äæ*íÚ.µÍ..–·¶NŸ{NU%%]Žó×å­VK’þòÉ'B~ÞÎ×Ü[»]oýkeÝzk·Ã©ëKKõÎc? DZìö‹ÏHÒ¡U_ZfÜÇPÃÇ»»ÙÅê±.þz—mܤ“¥¥!kÛ›{™ävÕØÿ=s*íCÖ¤§ë4¨s~¸i“\…²¥¤(§x¹ªKJúÜíÔyñ'˜Ñ!æLÑw~±AžùFèÒæõª¾¬L#’“eKI1Ž“¤3õõÝžëL½¯Ó,f„Íè&ìË»]µa½1¼Ú[W§úóáNrn®l))Ê..V\rŠ<óŒq\JnnP¨øý×b±Û•¿öÇjõxŒ¨ñS_-ýá’·®N_õðù:sÏ›§üµ?î±¶³~údPm;›kÜÿù{ºæÞê\ǯêëkKÌÌ4V™žùäÚ}>¨ô³9Æb.þãO=¿2uN޲‹‹/ØUùWÖÇûëc·÷º.IãÝ}ª‹ÿ^ú?[›×k uoóz%ùÂY?}ÒxîÔÑ£ªû T)ßÊ º®½>FPz4¨!a«Ç£O?c`}vœ@ª8ª½=4Ä4ÉíÖô‡V¢;Sùk¬ÆO?UcE…+*´ûï {³€GÅŽªØ±#(´ëËÂ×.ï˜ñàsÏéãßm5^³Øíš²j¥2çúºý>ÛµËèË_»V’/øyëÕ]¬À0ïêÅ‹ŒÈ¿êòßìÝ#I:ºsW¯»5}„?î¶¶6§S³ž|Ò¨mwA_Nñrµy½zçÑÇ‚º*­ìâå} ­ÜóæuüxëÆç umɹ¹Jr»ƒj6= Ó®s—ävkæOŸìvžLÐk±ÛC~.‹Ý®ü‡×ÊUP üµ?–·¶6d7©« à‚uÉ[µªË½œó?¡äœ:z´Ëw/oÕ*I¾ÐòG3:(môÕä¦W^VŒÍô¾¡ úÂ%u¥¥úxë’:V;î­)?ô…"m^¯ÞZ½ºËtÚûØcF§Uö÷‹‡¬°»]W/ºU’ttçΠ€Pò¦ï?÷¼q­s‹$ùÂ:ÿpÔ÷Ÿ{>d‡[ÅŽFGcàÔK1qñ"£¶BIòÖÖê­Õ«ëõ¶P:a’TUR¢£;w÷½/²nýkß5ÔÕu ý×V†&ß±ívC„>÷\—N¿ÆŠ ½µzMçîÞêsùƒoã;×ixz_êjø|OüûÛ¹«Ëkom­Þîy_×âÉúW¼¸|(Núá¦MòÖÕIêýjÇ6§ÓØïhˆPÄÏ[[ktM¹ †,qÛŸœE;kõx´ÿégtð¹çŒ}êJKõÖ«/8D´îƒÒþ½Þ‚I¾Ðª»@Z=ݹKÒùáÒNg—}NU ÙI'ùñëË}y籟è­VëGëvŸÀU§ã¨É˜;ר>vþÚC}güA]g™E¾ð¶¾¬¬ÛÏÕêñÁw¨9}Ÿ½®WuIéæøždÝú×!ëY±c‡þeÉRíþ»úeÁ0| IHèï¾òËxíIÌìXa¸úC'O– yÜ` ªzšï®ª¤DÿnkÐ>u¥¥ª*)éìXÎÏ—S¼Ü¯úC`0X»Pkª¶m_y»=6pþǾÜÿÞPuLr»•9w®®½óÎÇúW…®/+ë1( õ¹-v»Ñ­×æñ(9'§Ûÿ]¦s!ëé×ÓÜ'K/.ð ì̼mÛï5ë§OêêE·† oz=T'ö;îíjlj݆þ¡Ýé®n0¥|Ëõe¡“@»]ir‹r † Õ.°ö‰nwŸæº»Ôû’’›«´‚%wkDrJ¯†çŽ|Ñ×dº ƒºCÃÁûÏ=¯$÷xcQÿ5æýð‡òÖÖ©ª¤DŸüó?‡ÅÏoÑCyò‹]í¸/+öF"÷¼yú‹•÷‡BZ_V¦ºJeKIVfÀPÚÁÒöÕWƒ~N‹Ý®™O>2(õ¯}²ìPÐÊÌ~}ç¯?\a³ ÊyZ=m¿ç¹çÍSZa1dÜÿ¹¯^¼HW/^¤·UŸ{Žßv [Cöeµã¯†±&ffv;¿[¸ém'›_’Ûm„]m^¯ŽîÜ¥ê’yëëƒ:Âú²àK² £>Õ‹@·?„õee:¶sW—U„»b[_VÖ«Å].ìu^:œøWá–Îw[*%7×è0¼zñ"5;Öeѿ衾€PÃŽC9õiG es:¥BÂÑgš<`ŸëƒR%çäÈæL‘Ånïv><÷¼yJ+˜®S*Û¸IV ~ëÕÝvVúçÚëÞ€ÎÌѹ¹=°aÜ׃PÛÀÕ‰«öíÓÞÿöhèëJÆzkk•œ“£„ŒŒÏªK1°C5·å…tþnÕ•–×íž7ÏœÓ  @·.‡‹è¼Úq(u¥¥Fà—Q4§Ç÷ó/êá­«ëÕðåØe×Ó\vi…r«ðúC¸SG{¼öÑÙÙýv­ÞÚZ£þ®Â‚÷õ×¾Íë”nΘ€¿OzèäÝÍœU%û$ùÂ4÷¼y!÷±ØíÝ~§ªöùŽO»ÀJÙÓ^«¿Ù»G·mûý ¬¨í*,4Î×]eÅŽòÖÖu©#@gav^í¸;Gwî’äëúê.ð¹zñ"c:ÿþ¡ tSURbo×ÞygÈó¥äæóÈù¯Õ„&º3»½Æüµk{uý} †üçOr»uõâE!÷qÏ›gtÜõTÛ’ÖMØšävkâ­Ýí}8UqT’ô+ïï²ò°ÅnWþÚw[Ïê€ñ/V®ìöüþù!›ŽëqåþrêèQc»Û•Ýnãg¡ñÓ t':\.$pØqw>ܴɘk-íuev¶ŽíÜ©¯NžÔˆÑ£uõâEF×Þ©Š£]VK>0¬væ“O¨¾´Tõ¥eÖçŸoÑæLÑ/¿¬C›6éTE…®1B)ßÊUÖ­¾ÏÚæõªü7$ùB)p8óÉ'tð—Ï…®‚eΛ+Wa¡Ú¼ÞnCÀSG•èÎôuÇ;§¯¿òê“7þ¹ÇðêÐÆ[X¨Dw¦òV­RrNŽ>þÝV£¶sçÊ=Ï„yëêôá¦Mƒö½ðÖŒ¢9:UQ¡ê}ûÔêñÈæt*³h޲n½U»½Ûšìúi}çd±Ûõ_l8V(.9E)¹¹²9Sº=¶bÇcQ÷¼¹JïÖ'[ßèö>ü_¿:$çä(íZ©¯ÓÑ]»å­­UÕ¾}ÆuIÒ±;õõW_éŠ#ds:•½|¹q]Ÿœÿ~„N¸Úq(­ÞZ½ZßÙ°A‰îL¹çuWªöí Ù™8%çæ*97×·Zðß=0 Ÿ§®´Tžùš²j¥lÎMxm—}¼uuzçÑÇŒ¯bÇeÌ-RrNŽ’ssuÓ¯^ ùùŽîØ©Y?}R’oèiUI‰ñúñ’£Ñߨ›0ô­Õ«5ó§O*9'G®ÂÂäëËÊ‚®w0¼óècFȪ†m^¯Þz`µ²¿_|þÚ ‚âÆŠ ½õ€ï³ÙRRº|¶·¾¡6¯·Û¡îž~Fm«V*sî\%¹Ý=^Coÿøw[…Wüßõ¯êOª¢¶Vž~F¶ ü,´y½Úÿô3A‹Þt6 !á™úzÕ—•IêÝ"þaÇÙß/6޵Ïö{îñ-øQX ÄÌLÙRRÎÏá÷©ŽíÜÕmæ­­Õ[¬ÖÄE·ó¸ù‡a¶y½Ý^kàkõtœä ýêJK5ñÖ[•4Þm>õeeª*)ѱ»ºn»ÿî¹çÍ3ÂBI!?_Õ¾}Š±Ù”èv…„‡6nÔ×_yƒ‚0ÿµù?o¨á§­qîεõÖ×éèŽAç ä¯AOÃZ/T«îÔ•–êÍ{Vhâ¢[•œ›+[НóïÔÑ£A5´ î¼ GcE…þeÉR¹ •èîXô¥º¤D=®ÝêñhÿÓÏèØÎ]ʘ[¤$÷xcåàú²2Õ}Pªò7ÞœöTïÞÔ¥ª¤D{}L™óævétìü³c³)9'ǨMO×è²sçÎ õ¶mÛ´}ûvIRrM­œt"!õ´²t ìâbå/W›×«×ç/ p`X¨u:UŸêûÇñùóçkÁþžB»œ`8˘[¤¿Ù»G7ýê•{ñ¯ˆÝ]Ç(ÀpFHˆa­þßÐì$·»ÛÕ‰ó×®5Vö¯f `&Ñ”ÃYcE…êËÊ”œ“#÷¼¹JÉÍÕ±];%IWŒ°ÉUXh„Uûö©bÇŠL‡ÃÞ;>¦ü‡×ÊUP ›3%ä"%o}CùKŠL‰Ã^«Ç£½ÿíQÙœN¹ tÅ›bl6µy½:S_¯ª’V¦FHÓðÖÖêãßm¥°p `r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&GH˜!!`r„„€É&M  ï¼6›±Ý¥³#â( ì|e³ÛŸ~ú©¶oßNQÑ&L˜`l[­V¹\.Šý„zÐbµÊk·éŒ5N_[bÔ2b„Ú/§ yÊËËU^^N!0ì89¹\.¹\.M˜0A‡ƒÂ@@€6‹E§IòØlA`|œEΔD¹RгZ4ÁjgµÈ•Ê_DZùÑc»¥¥MU'¾TÃ)¯¾lôhÿ¾µœm•$#4œ0a‚òóóǨ¸BB¦×f±¨9~¤=ÿHç•ñº.+MYî1r%G¢B ±¬ÌԠǹ“Ç=®ªiPÕ‰9Z£ò£GUZZª-[¶(77W¹¹¹ÊÉÉ!0€n0-¯Í¦ºT§¼vß|MÎ+ã5cú$åçWœÕB"Œ+Õ!WªCÓ§úæ.l8åÕÿ}§L¥‡}¡ÕjUnn®,XÀdèä²sçÎ õ¶mÛŒ‰“kj嬭¥Z†…æøx}‘nœÚ,1J°[µüÆoÿÊ €áÍ•êPñ’Y:ÓÒª·ÿø¡Þ~÷¿tàÀM˜0AÅÅÅt0=:  {m‹N¤QsB‚âm±ºå¦i„ƒ&×VËÙV-X°@³gÏfÎB¦E'!€aí‹ÑWªnÌE[b4V¶åQ(ÎjÑ‚¢<Ýð—×jË¿½§mÛ¶iÿþýZ¾|¹²²²(Ó¹œŽÚ£¢T1a¼N¸\7.Eë~|!ºˆ³ZT¼d¦ÖÜ?_çÚ[µ~ýzmÙ²…Â0:  ;^›MŸw+Ú£å]ÈÐb\PVfªžzô½þ¯ûôöÛo«¼¼\+W®d®B¦A'!€a¥>թЬ Jq:ô£¿½™€}rû-º¿x޾<©'Ÿ|R¥¥¥€)6*ÓÓUët*?o‚úáB¹RùW_ô]îäqzìÁEr$Äé…^Ðþýû) €a@ÄkŠRù¤ktj”CËoŸ¥â%³gµP\4G¢M=x«òó&hÓ¦MÚ¸q#E0¬1'!€ˆÖ¥Š‰Yj‰ÕòÛg1¼ýªxÉ,IÒ|‹‹) €a‰@Äò„çFÚ´æî¹ÊÊL¥(èwÅKfÉ5f”¶ü›oØ1A!€áˆ@D úáÍÌ?€uÃŒÉ²ÆÆhÓë{%~ D¤Ï'ŒWKl¬ÖÜ=—€ƒbúÔ jh<­íoËåÒ 7Ü@Q „„¨ÍbQÛWH’Zâ¬ú&:ø6ž½"F_[bºg÷z»$—ËEQD4BÂ!r©¡ sl†$iLº»Ëk©ã2CÓvö¬¾¬;ôœ§¹Q§›åij”÷tSçôÚíA݈±gÎÈîñÊæñbÀµY,ªëRæØÑ*^2‹‚`HÅY-zpå|=òß«ÿI=öŠ ¢¢æøx5%$¨9!¾W¡ ctªRÓ342q”ÉNYb­•<撮᪬ž»¯Z϶èËúF x¢²B õ5jk=Ûeß³qq:§/’GKò…†Ž†Å7ŸVLk+7ýª*ã*EÇÆèî¿ù+Š€°gµ¨xÉ,½°q·¶mÛ¦ P‹p€µX­:9zôƒAÇèTJIÕ¨”49’!;ƒ%Öjœûª¬Éšª"I’§ù”¾¬;Ñcpx6.N'ââtÂåšœÔШø¦&: qɾH-O\œî¿c¶‰6 €°‘;yœfNÒöíÛ•››Ë°c‹p´Y,úâÊ+Õœ ¶« KRŒ%VWeMVjº[WeM–%ÖÖŸÉŸ({|bPpøeý }RúÿTSyL 'k‚öïšœ®ø¦&Å75+©¡/ú¬ÅjÕ‰´4åLJWîäqagAQžJ?ªÔÆÿ¤|Hqqq@Ä!$ìG‡N޾Rg»ùÁ1:UWeMÖU'_ò°áp0*yŒ ‹¾+É7Lù³òTSY¡ÏÊ? ê2lNHPsB‚ªÓÆ(¾©Yκ:†#£×N¸ÒÍ<„[qV‹Š—ÎÒú¶ëí·ßfØ1€ˆDHx‰Ú£¢ôeòh5$9Bv ÚF&説ÉÊ™6KöøÄa[K¬Us¦jbÎT#0ü¬üC}~ä°±Ï7ÑÑ:5Ê¡S£Jü²AކÙ¼^¾Dè–׿[ÜgþÌlÅY-a++3Uã3RôöÛÿW7ÜpÝ„"!áEò‡ƒõW^r®Á¬ì<]•uí ŽCOó)}Vþ‘>)ýAC’ýa¡ÍãQJM-a!BªÊ¸J‰ñqZP”G1ö¾¿t¶ùï¯jË–-*..¦ " !aµY,ªMI ¹‰md‚r¦ÍRVv^ØÏ18Xìñ‰Ê¾n†²¯›¡•úÏwv©öø1ãu¯Ý®Š,»bZÛ”R[˼…04:j½â ­ºc6Å@Dp$Ú4»p’þ½ä€,X ‡ÃAQD BÂ^jŠRuZšNêúKÞ62ASgÎÕÄœ©ªcÒÝs§[žæSúÏwvªüÐAãµ6KŒŽKWÓ©±ŸNg!?o:áJSæØÑÊÊL¥ ˆ Šòtà`…6nܨ|‚ˆ„„½ðÅè+UëtvétŽÍÐÄùßݵ IDATœëûÈŸ¨Ù —jê̹*{o¯Ê4:i³Ä¨"k‚lÆVg“jåP{T”–.šA1Qâ¬-(š¢-ÿ¶_UUUr¹\@D¸œtÏk³é“«'ê„Ë:Çfháßܯ[î\E@x ìñ‰*,ú®¾÷·*oÆÅXb;jo·ëO“'©>Õ©ö¨(Še2 IIr^/W*Ã3yòóÆK’öïßO1D : CðÏ;Øyh±md‚fß¼TcÒÝ©Yb­š:³HÙßþKýç;;õá–¯Õ:ª¿òJ¥UŸ`¾B“h±Zu6.N3¦O¢ˆHqV‹r&¥ëÀºýöÛ)€ˆ@HØI}ª³ËŠÅ1–Xe_÷—š:³ˆ K¬U…EßÕÄÜëT²ë_N¾‰ŽÖñqéjt$iLUµ¬--kk8?¹³ÿ__€H4}j–ÊWª´´T¹¹¹@Ø#$<¯ÍbѱŒ«t6..èù¬ìH@üó~ïo•ctªñ|sB‚Ož$¯ÍF‘†‘G’âm±„„þjfŽ$éÀ@Ø3mHØbµêÓ ãƒ'±LÐâk”}Ý ¾aÆŸ¨Ûî}P×N-4žû&:ZYTŸê¤@ÃÄWv»®™8–B`Xp$Ú””hWyy9ÅöL9Üø‹ÑWªÖé êÌÊÎSÁœ[d‰µò­c…EßÕU¯ÕÎ-ÿ¤¶Ö³’|+ ŸŠWƱÏÓÚJ‘"T‹Õªö¨(MÈL¥6²2*=|„B{¦ë$¬LO× —Ëc,±º~ÁÍ^¸”€0BŒIwë{û¨œc3ŒçÎÆÅ铉Yj±r#UËù9Z\cÆkÌ(µ´´¨¡¡bk¦ Û£¢ôYfFÐðbÇèTÝ|çJMÌ™Ê7!ÂXb­ºåÎU*˜s‹ñÜ7ÑÑútÂx5:™"Ñ™ó¯+•û€á#-5I’TUUE1„5S„„íQQútÂx5'$Ïeeçé¶{Ô¨ä1| "Xöu3´xÅÅXb%ù‚ÂããÒ #PKœUã3R(†•¬óÓéwÃ>$ô/Pr6`¹ù¼s4{áRîþ01*yŒn»÷¡ ÕK׉´4ŠA¾²Û¿@ÃÉøŒ/ö†uH* ¼~ÁMYÄfìñ‰ºùΕAAáÉ£U™žNq"€×f“䛯n\©UWWSam؆„Í útÂø Jæ.þ>óc–X«n¾s¥²²óŒçNrè“«'ª=*ŠE«5†"`؉³ZÔÒÒB!„µa6:ú,3#( ¼ùΕº*k2w|˜³ÄZ5{áÒ  ðl\œ>0ž 0ŒµŒ8¿²1‹–`r$”ļ„ÂÛ° [¬VU§u,Fâ_Á˜JÌeöÂ¥A+ûƒB„§?_î p㬊€aÇ‘ä›^çÌ™3@ØV!¡B!¡¹e_7C×/Xb<>Ç…!D—Ò9 Œ±ÄjÞíwÉkå.›˜Ê=Û^“䛣P’Ò++)º(?Z£÷#ii¥ÀÉrÑüïü…Â̰ Û,–.áÍw®”=>‘; MÌ™ªšÊ •:(‰ 0yí6ÏHòëxûªì0ß ` 9Z«ü¼ r$Ú(À4²2S}9¢¬¬,  ,Eüpãö¨(˸ªK@Èc꼘ɩQ5:X$Áè Ç—§)f"º“°=*JŸN¯³q¾•Q ѓ٠—J’ÑQx|œo~¤†Šƒ.n›ëRZ2ÓýeËÎ*U×·P LEtHX–f„’T0çBôhöÂ¥:ÝÔ¨ÚãÇ$ù‚Bë™3²¶ð®––lUÖ8;…ú‰ÕE€0±Ã¿HmÌ-'I×/Xb,RôdÞmwÉ1:ÕxüYf¦Ú£øW`^¶X­:‘–f<ÎÊÎ# D¯Yb­šwû]бÄJ’Ú,1ÆÐc3Џ°=*JŸef£S¹æ€Þ²Ç'}ošTëtR`J—®6KŒ$ßB%ón¿‹»ˆ‹rUÖdå͘c<®OuÊk³Q`:Ö:jNH0Ï^¸TöøDî".ÚÔ™ErŽÍ0ËÌ`~B`:zm6Õ§v ½vj¡®ÊšÌÄ%›w[Çü„ßDGë³Ì ŠL%bBÂããÆÛα*,ú.wýÂkÕÜÛ¾o<öÚíú"y4…¦!a­Ó<ámÌCˆþ5&Ý4?amJ ÃŽ€i„}HØf± 3ž:s®,±VîúÝÔ™E²ôÍyùMt´N¸Ò( 0…° §5¶c3”}Ý îÌì›—Û«Së°Ñá×n7ÝÂÀ“îVVvžñ8p.L€á*lCÂö¨(U§1_;µP£’ÇpÇ0à æÜb¬vÜf‰Q­ÓIQÀ°¶!a­Ó©o¢£%I¶‘ š:s.w ƒÂk ú¾}1úJµY, [a¶Y,ú2y´ñ¸°è»,V‚A•}Ý 9F§Jò-b87&Àp–!a3ÅØ7a’®ÊšÌ ›}ócÛk·³ˆ ¶Â.$l³XÔèp§Î,â.aHŒJ´ˆIã(EÃRØ…„]„α,V‚!87a£ÃÁÜ„`X «.B„{|bP7a`ˆ 0\„UHع‹pLº›;„!—•3ÕØ¦› GÑár!t"\IwË96CµÇIò…Ùc?¯¤0è“-;«TUw†BÀ´ªë[:~þí€â¬1ýúþqV‹æÏ™"W*óÇ#lBBºΦÎ,Òïÿ÷ ’|Ý„)µuŠim¥0蕪º3zû?NRà¼êš†y_klŒŠ—Ì¢ÀÀE‹áÆíQQt"¬ù» ý˜›ðšããƒ~®ÍìÌÙvŠ ‚/OSà"…E'a㨎 Á1:•.B„¥Àn¦øx¥$ÝjNHÐg™¾PÕc³)½’áÙ~iÉVÝ6×E!€~R]ߢ-;«(p‰Â"$lHJ2¶³¿ý—Ü„¥1énÙF&È{ºIßDG«ÑáPRC… áŒÕjlŸ:ÿ…>VK”²ÆÙ) ¬ ùpã«UgãâŒÇWeMæ® lMÌ¹ÎØnJˆ§ ½tj”C•éé€05ä!aCÀœeYÙy²ÄZ¹+[s;BÂÓ jŠ¢(½DP@øòðtB‚±}Uֵ܄5{|bÐ&ói"4«¥#H%( < iHØœ 6KŒ$É62¡ÆˆCŽçÓDh7L­üœŽ0• €ð3Ä!aÇœn„ˆßÕ³qqj³X(Êß2Ž €06d!a{T”æ#Ì™6‹»ˆ`‰µ*+;Ïx|òÊ+)J/¾†,$ôÚíÆ¶ctªìñ‰Ü DŒÀù3çÕDÏ OCzl6c›¡Æˆ4WeMVŒ%V’Ôf‰aÈq„ ÏR?†Ô…„ÝW©ã2¹ˆ8©éß[o@è ë¶ÇĨîd…`ˆ IHØf±«KÒ˜t7wg̸ñƶ×NHØWƒB÷¬6¾¶—Â0†$$ ìºrŽÍà. "¥¦g|§íä"t 8î‰øÉæHN5¶Û®¸¢WÇüùò(óÔ'¡ï]… =`p z'á×Ã-±Öˆ(Ò¬97iïî7µù×Ïu;ì¸;ï¿W"ÏéfÙGÆkÂÕ“e^ZSU©ÚUš2­P凩öD•&\=Y©®t•>$IÊš”­šªJùø£.ïÓÝóùßÛsºI©iéš2­ð‚×î¿¶Îï뾦ºò‚ïå¿>IÆçêŽÿí#ã{u}ábdB’j“$µŒˆ“ÍëíÓñù9åç:†å/˜¸Ø(¹Râ.êX: €Kó /¨´´”BhþüùZ°`Á°ø,ƒú-‘"g¸ñϾ¨ &ë¥_<­¼i3”5)û‚ÇìÙµ]?âaÕTUÏÙãôÐOžÒÂÅß3žûýÖÍziÃSZÿÊoµfÅRI’3m¬þ°ÿ°~öøZIÒÄI9Úüëç‚Þç•×ÞÔ¶­¯=ŸêJ×+¯½Ä•>¤5÷Þtþ÷Xÿò«=v ú¯-pNÀŸ?þpÐ9ýç]ÿò«Auñ47iͽwèàwƒöÍËŸ¡õ/¿:†Ú7kR¶¦|;2‚B{|’±8œ¾· 1Êgç7k…ÀÅ# ÞÛo¿MHx1vpŒN˜"Ùãôij/jÍŠ¥Z÷ÐýzmǾ÷ß³k»Ö¬Xª ×\«õ¯üV×Íמ]Ûõâ†Ôºï—}d‚®/št̺‡î×w­”=>^©icç߯DåúPë_ù­²®¹V{vo×ÏXkî½CçÎëòüÏ_« ¿zM’/|[±ä&;wÎú<ÍMúýÖÍúùãkÝC÷ëÍ}õº¼«Í¿~N -ÓÖ=-{|‚xW«W,ÕŠ%7éVûúC¿¬þ{-\´LRGè¸î¡ûµþ•ßvÙ÷¡uOká¢eª©>®Ÿ=¾V¯þæùˆø~¤ŽË”Îç›ÃéÑ? {gËÎ*UÕ¡03gÛíêšF=û¶9OœÕ¢ùs¦È•ê è0´´´ ›Ï2¨!a{tÇébbc#ªP×Í7†¿¸áußêGºÝ÷¥_<%IÚðÊo®¾ë‹æ+ëškuû¼ýü‰‡»„„³¾s“~ôÏ„|¿'ž}ÑØÙÝ«´ù×Ï«¦ª2¨ÃÏÿ|퉎 ®üOÊ9Æ¥…‹—ûÙã´ìîUÚ³k»Þ¯¤O58øž/ [¸x™Ñ ˜—?C÷­þ{yN7«¦ªR©®tíÙµ]¼«‹–Õé¾Õ¨¦ê¸¶mݬòÇ”5)[å‡ûú‡rgÅ'hÃ+¿Õ“å=ÝößÀaó__䪾èAaϪêÎèíÿ8I!À_ÐζéÈÑÚ{kl ¿wký½÷R ­yùåa÷™5$Þe¨pjÚØ à//†^ß¹?hOs“Ž|üQP˜Ø[Y×ø®wÝC÷kÙÝ+5åÛ…Êš”ÝežFÿ°á…‹—uy…‹—iÛÖÍÚ³{»²&ekÏîí!÷µÇ'há¢eÑM8l>p8=úAa¿[:© œ/OS„UUÓ ²Ã• ŽD›r&¥+ÎÊ?˜@$Ôðë€ÅžqÅêͰc@–šzެIÙÚ»ûMy:uÈÙGÆÈ5×TUêÕß¼ ò?RùŸ>”§¹é¢ßëú¢ùúÁê¿×KžÒÏþa­Q“ëçÌ×xØEËÿä[peÅí7^ð==ÍÍ=Ô;>b¾¶‘ òžöÕ¶Åj•uµ‡‚ KK¶ê¶¹. ô“êúmÙYE!"Ø™–Výtý`³ 'éö[ (D°A [cCÂĈ,XçaÇùÃ>Ïé¦!¿ÖšªJ-¹±Pžæ&M™V¨;îº_Y×d+ëškõ“ïëópcÉ7dxÙ]+µg÷vô—†¦6ãïc€È7¨!áWöŽî–Hœ“Ð/pØñÞÝovyÝßixðÀ»A!YùáCzÿ½M¸æÚ.ó7cçn>ÿuôÕ‹žÒ¶­›õÚŽ}Aó)úç*ô»¾h¾^ýÍózõ7χ\ÅyÏ®íZ~Q—ëçÌ×ϸ˾žæ&m{ãU~BÑ-‚B@_9âc´`V*…úIùçBBF.§Ç?ì8”ûV?"ÛÈx­¹÷½´á)½ÿ^‰^ýÍóZ±Ä·ÿϾ8h×(I?âa½ÿ^IÐuØÎwð•>Ôë÷[v÷JIÒš{ïж­›õþ{%Ú¶u³~þÄòŒ×ÂE¾ÅGòòghÁ¢e:xà]­¸ýFíÝý¦Þ¯DkV,Õž]Û5áškkKu¥ë«ÿ>hßm[7kÅ’›.iþD˜Cñ-㔟ÓÑrààm|m/… n| üÃŽ½†ÊfMÊÖ¯^ÿƒ~öøZß¼…|ÏO™V¨­{¦ÛU‘ûÛÂÅßSMõq½´á)ÝsÛWçsKêqˆµßòÇ”š6Ö¸ÎýÃ3|qA…\BÂAÐS6XbÄÞ~®¾œ{°º,ûÛ¨”1ÆvKœ•/ý (àâ Úœ„^›ÍØvŽÍ òVbbcíö¨( 2D˜£€‹ÃÂ%†‚BúŽÀ°S|Ë8ådÅ<¢ò£5€nvªêÎèÈç^ãqR‚M®T… „„†•ªº3zvãµ´¶K’¬±1ZyW‘⬊@7-$ŒùúkcÛÛ|ŠÊèw¡ÂW. ‹€ ¼°µÕØöb˜i¨¯5¶­gZ(È àâ1Üè­-gŒí¨öv 2ȸ4„„æÅ ÿ¨oµëàw/ú=ÊRMU%Åİ@@À¥#$4™òÇ´d^jªS D<BúÇ †„W0/áóœn¦è?уy²˜¶6}m±H’N75ȟȸ€òÇtä㕚–®)Ó {Ü·¦ªRµ'ªTS]©Ô´t9Ǹ”êJzýÈÇJ’Ž|ü¡.»ì².ïY~ø¼žÓÆ{L¸z²ìñ ܈ ø²þ„±×ÂÂ%€z¿Ù½[}þ9…€éýà?°÷ž?¾,X0(Ÿ#š[ž<ÍMZ÷ÐýÚ³k»ñ\ªËÚ…Úwͽw„œ§pÙÝ«ôк§%I¿ßºY/mxJ’ô³X+Iúà¸G’/\sï]æ*´Ç'hý˯*/7¥­g;‚Á¨?ÿ™‚ B <ïí·ß´pP‡Û<^c»æó£Üéüìñ‡µg×vÝq×J½¹ï#½¹ï#Mùv¡öî~3ä¾¼«‡Ö=­?~X¥Ž{ôÊëЄk®Õæ_?§òÇ$I -3ÇÖ=­W^ÿƒ$_ȸbÉM:ÝܤW^ÿƒ>8îÑ?¬ÒãϾ`„•èYC}±óõ×d€ œ”­Ñ³/lÓö·þ‹b-ƒ8ZqP; £¿i7¶=ÍÜénÔTUjÛÖÍš2­P?ú‡gŒçŸXÿ¢¾÷®j;-:RS]©)Ó µìîUÆsyù3´ìî•Z÷àý:øÞ»Êš”­TWº²®É–$e]“mtÖT—sŒK /3ž³Ç'háâïé÷¿Û¬÷ß+á¦ô õl‹ÚZÏcæÞDÿ! @¸Ùò¯ûU]Û¨#GkõeÃi/™eÚZ¬¿÷^¾@?Zóò˃~ÎA ­_1¶O7vgÏnßãë‹æwymáâeÆa¿_mÙÑe¿òÇtäOIºðb%Y“²õúÎýAÏyš›täãT{¢ŠróŽðx(È „ûŸSeå,óihj3¶›¼ÖMçH´)gRºâ¬–°«AumÇ×8xD’Lˆlƒ»pIÀ0ÌÀá™æõü]RÓÆ†Da/´íX´$êϦ ªë[´¿ôK-˜•Ú§ã |•~ô¹ZÎúæ!Ûôú^I"(<ÏÓç?ót¯üsOD…„Å·Œ“$‚BëòÁ>aàpÌ/몹!Øãô£uO«¦ªR+–ܤ½»ßÔÞÝojÅ’›tºÓ\©ice¯Wó‚¶mõ­D¼w÷›Z2¯@'ÎÏMÊ«¿yA/mxJ5U•ÊË/”$ýü‰‡µw÷›zÿ½½ú›çµbÉMƼ†¼ËéFÃÉŽù5m^o¯ŽIhjÒ¬‚!„·3gÎ=Þôú^íÿÏ#ä ós:þÞzààm|m/…½“0p8fMå1î@7.þž$éÅ Oiõ=K$IS¦ê‰g_ÔšKýR]ézâÙõ³Ç×ê'kî3ž_°h™^yíMÝX0Y{w¿©ûV?"IÊËŸ¡Ys|ÁcùáCš2­P O5ÕÇõRÀ¹œicõÐO|«(¯{ðþ ÎCtjÜ—ù­--šôÑáa[—Z§Sõ©Î>G@‘‰ŽBèÐ]G!„»A ã›::áNÖ°xI.þž/À«ª”}d¼1ìøƒãž ý®/š¯ë‹æËÓܤšêãACßý¨k·fç¹ %é¾Õè¾Õ+#§ºÒƒ®¡}öɇÆö‡‚\Bˆl…Ð!TPxîÜ9  ¬]>' Sj*r. Õ•n„=±Ç'„œ›°¯ç ѳÀnX{/‡£+BˆL³¯»RiÉÿØËÐcèÐyèñe—]¦ÒÒR  l IHhót„)Ÿ•È]@Dj=Û<!„…€"Wœ5ZO (€nt «««µqãF  , IHh§“Ã@àww„Ç´r7z‡€"_\,A!þ?{÷å}çûÿe´( ›V!M=1 Óͦ§Ð“eŒ{Åô|¡âù®¦izq›h“þ”ìÕ=9ÝËál›sº'þÈ›4¦©°ß¦¦˜ëý®º_ÃPO ñ8í†i¬C1ZFqq—Ã÷ñ¾½ç'‚Éd= IDAT3ÏÇuåŠÀÌýã}ßsÏðâý¹?≘̤«‹ @Rº5„–a™ÃCƒäH`Ö9ëùÝõsz˜¡Æ“ÞvÛB˜¥  >‚B³Ám·jÅ‹,˜œõ|Ì‘À¬r?B†OJx@8g|\ù¹„0‹@|…’Ý- ­“<ô÷bv ½!“–$,< œ;6¦ÛGF´`þ§(Ìr…A!€dvËB¬K×;¯ÎôœäH`V±ž³Ö®XÄ- ¼§Ç­ÛþÏÿ¡8"  >‚BÉjÞ­ZqF  OŽê_,ÐÕÑ+ú¨û¨¾PöG³ÂG®÷Íçø‡(Hb„Å€c…ÛÝê;¼Î7ýÂ)Iªx¨„˜4÷ésÚôü®”ÙŸÚšbIRW·/øÿ®®à÷kk9Øn™ÛnåÊïüãÍŸéù£YaàüYs¨ñܱ1eÓI8!BH?t@|tH6·4$̶t`yܧ˜å³‚µ‹0Ûï×ܱ1ŠGßù!¤)‚BéÂöMA!€d2ïV®|þè¨ùýº”“#)xŸ·¾ü0GI­çÄ濳j‚BÉà–‡„¹>ŸÎhlî\  jàüY-þìRŽ ’ÒGÝÖ¡Æt&Š€@:qôèÕÆC" A!ÄGP駇é¤ÇÃÉ´·iÓ¦[vUU•ª««%Ýâ{¬ÝX'~ó+Ž>’Òè•€<îSæ×Ö‰wjÞÿ¹~ŸFBéÆ{v mö53croæ…÷( E@̼Ç_ÿ]>6è3ç/èb^ðBx¦ç¤F¯´àö Ž’Š5ÀÎ hþè(E‰!wÀ'v¶$©ÀÛG@ }¯‡Ùó•—=?%÷­ðs*/Ëôóè(€ø¢uöõõéûßÿ>Å0í–ßד"$Ìô©ÑQý낺:zE'~ó+=T¹Š#…¤1z% ï_ ?sáE‰cîØ˜îuÿŽBH{¶ÿUóß/]U[ǹYO^Î|••f+óöyIµÿµ5Åf7¡$ýñLû ÀÌJš«`®Ï§sK>G7!’’µ‹pɹO(…hDÝ¿í›òó³eȶ¼0mjÕ´·Kž>Ÿ\§¼²——ªò+÷Ê^Qʉ„´BP8±‘+ÿ¦¼öÏiÏ翪VgÿŒ-ÿ‘/FmYÒ× ÀLJª?•ÐMˆdôQ÷Qº¸Nyµbíö)?¿²¼DηëS¾N´ê‡ò˜ßsvºÕ¾w 'ÒAa|ÞO¸¯/Àk-A!€™’T!!Ý„HFüê ùoºÜ(Ƕ63 ,*ÈSqapâ.ãÿ@:"(Lð³rö|UظVÓÅç¿2œ7Ù•—å™ÛKP`&ÌK¶ ¢›Éä£î£”D!0{E©ÆÏ¾õgŽm­jØÞ&I1“.öê–$•ÝW ×?ý 'p AáÄò²ç30z<ó*$ Ÿõ˜ Àt»-Ù6(×çÓ§FG%Éì&nºL7£‹°æ1ÅÂAaÁg¯ÿ¸éNuuSP0(,/»ÞQl…0nKƲ†1'Þÿ•F¯p/Ü|G;š]„sÇÆ”í÷SfA!ÄGP`¦ÌKÆ ¿7á{‡ZôÈןàhᦠ™ÑøsçÎiîØ…n’k·I’v8ÖI’Ö|ûUy¼>æ©v]…¶n©2ëñèå7Þ•³Ó-×)¯ùýâÂ<ÙËK´uK•Š Ç]GqAžv´©±¹Ëìô³-/TÝÓ+µ~myÔmôèå7ÞUËWÈzí%ªYeÓúµåÊÉΔœÔe³£9äùM{»Ôñë`è±~m¹j×UÄݧœìLÙËKôÜ·‰: ²uí{ëÕØÜ©ÍŽ½òȶ¼P[·T©æ1[Ä~¿üÆ»jlî”Çë3÷Ûx¬±Ÿ›Íj9ØÒ¹uKUÜÙ¨›öv©±¹SÎNwHmj×UĬédŽ;RC >†˜ ó’uÊ<½ú¸4ø!°çÄ*-{HK‹îáˆá¦x÷=æŒÆ#}æüŠÜDF° ¾öšá”ÇëS{g½ü“êÛÚu¯OÞ.56wißÏD ï5Öáìt«aG[ÈlÃÆºkëµóõÃ:~èûaËЊµÛÍ`-|¹ÎN·›»Ô¾w‹r²3å ËŒí3ž_ù•ë¡GË—6liŠØÿЈZ¸ÔrÀ¥ÚuåÚ½£6âçÆ:Z¸´asSȾ8»zTó˜Í|ŒÇëÓ†ÍM!§ñØ5ß~U»w¬—my¡V¬Ý±-Æv?ôýˆ Ð?4¢k·G,7´6Ú÷“gÌu¢câñútüäxa¤‚Bˆ/ZPø·û·ú›¿ážÇ¦æ¶dݰ¬Ë—µÈ2¼³ý·8Z¸)ÎôœTïióëeEn# ¬,/ÑsO=¢²û T».Ø…ÖrÀe„e÷hßÏè̯¨ÁßîPûÞ-!Ýj¶4ÅYG³üC#Z¿¶\í{·¨}ïíÞ±^EÁa<®S^56w†e/ÊÐîëuæ×?ÔøÙ×tüÐ÷ÍÓuÊknŸmy¡¹lƒu}Æ>hÄ\öàowhüìkÚ÷Æ3ª,#Í]Ú°¹1æ>û»zU™Ö¯-WQAžêžZñ×)oÈvìp¬Sö¢ ³ök¾ýªÆÇǵuK•ù˜çžzÄ\†c[kÈ2ÃÂçžzDí{·hüìk:óëšµqvºã# \½ªLÏ=õˆŠ ò¸‡cšbè1Ä>ô¸¯¯OÀ”ÌKæ+òôê·_\¨±¹s5<4¨£õPå*ŽfÌè•€Þ;Ôb~½øüe¸'&p«ø‡F´Ã±NuO¯Œø™1[rQAžœoׇt¥Ù+JÍ!¹M{ƒCˆ]§¼1‡Ç¶ïÝ1„×^^¢»¾ò_%C9ëp`£ãmgúïÛ–ʶ¼P®S^í?Ør»³68T8lùÅ…yß[óíW%IÙ‹2ä|»>d{k³ÿûËW´ÿ`·›»´~myÔ¡Çþ¡‘¨]~áÙº¥JŽúêºù/¨a{0¤‹¶{E©<^ŸöìÖþƒÝ!ËÜù“Ãf@~ÜŠ ËQ_­œE™ÁáË\jlî ©_¬ã¾³áq^ iŒŽBˆ/¼£°««+øýÚZŠ`RnKæ›;6¦Ï;g~ýÁ‘CæDÀL8Úq d²’%–óÀÍWT5 ”¤Êò{UY^¢º§VF [5?4¯»ÞM>döúrJ¢mÅ…‹Íν¡K¨ËñEÿ#‚£¾ZûÞx&¤sp"ÎÎsøqÝS+c|–aÆ;r8êcV¯*‹JÁ Òìå×C—õkË£.Çú=k=^~ã]³¦±Ž[ÝÓ+Í.Í–®Iw¤':  ¾ðŽÂ®®.: LÚmɾŸ9A#× {÷=5̈ógõáÑÿe~½ÌÓËd%À-f[^óg;—óíú¸a’§Ï7á:¬¡X¸œkCo­÷×ËÉÎ4C®ÍŽfmØÜ¨Ž.wØvªæ1[Ôð1§eÖp3b›²3µzU™$©ã׿‹±O¥ Ô6zˆX\˜õß¡u¹Êµqòša¼šJRÍcÁíïDLä˜ }@|Ñ‚Âññq  aófÃF.óxÔsß¿“$õ÷žÖGÝGõ…²‡8z˜6£Wzwÿõû^~zxXÙ–{b¸5&ê†3¸NyÕÛç“ëTŸü—‚C‹Ã' ™êú£YŽú*sbÆæàä(ÆìÃ5Ù´úѲ˜ÝñöÁm6æhÛ«;2^¸:‘‰ÖkùÖmÙ°Ûœ¹9ë„/ï@Ä:c…“@´¡ÇÍû;e»¿H™ (€´>ôxΜ9ל9s(€ ÍŠ0#Ðâó4ðÙ;%Ií­oiñçòµø³K9‚˜ïj‘ïB¿¤à0ã¢^fÑfƒ–.mv4GeX NhÒýÛ¾éÿ¾®BÅyªÛÚl.ß:û°¼‡àÇÚ„B7Iò_Ò\v_ÁM©Ýtwë¹NõYþíMøy¯/áÑ®\UàÊ¿€ñ9%FP™7[6tɹsú—… È 19м[kŸ®×‚Û38Џ!uUω̯—zû4t”ÂI®në/ôòOÞ5¿.»¯@¶å…Á AÊKÌ DV¬Ý>#ë·W”ÊõO#w@-ºåìê é:l9à’³Ë­ãÿë¤B°™5o†œìëïÇ;ëîfL´[¤‘+ÿÒE(Ië·+ïŽ,Šà†¸{‡µ©áXÊìO´ pÓ©'ìv6€˜fMH8wlLwýþ÷êùw_0g;no}K­ÝÀQÄ” œ?2›q®Ï§\ŸÂIÎã0²û ÔòÓgnY7ZqábÕ=½Ò¼7bË—ÛZÕýÛ¾kÝ…Ý MÂa[^qoÃØûŸ|שâ‚Ð{Næ~Œ@"b„Ìp Ñ……GÝÁÏ…b¹m6mìüÑQ-óôš_Ÿé9©ïá(bJF¯t y·®Ž^‘$eŒ´ÔÛGa€Y ±¹Ëü·£¾:f@h;]\§¼jØÞ¦5ß~5êÏk³Éùv½ùuËAWB˵vÔÅšõ×`Üïïf Mžîíßùúa5loSÃö6Nf$„€@º³NH2á“™u»µÇ餠¢š7Û68Ûï¹?á{‡Z”÷Ù%ZZtG“ÒÞú–†‡%;U—y<Ìf ÌBÆ ÄáüC#zùÃÓ¾>×)¯ÛZ%ðšÇl‘Ûd™´$Ña·5«lª[Ô¬¡K5lo‹º\)°„µë*’ç8dgª²¼D]n5ííRÝÓ+£%ö¨aG›üC#ª,/ÑÖ-UœÄˆ‹€ÀÍPR´PÏoHÍ.x: $ê¶Ù¸Ñ}}ʹþAñ@ón^ p4‘°ïÑ™ž“æ×Ë<½Êp³…uÒ—ßx7âçï€V¬Ý2,טäFÕ¬²)ûZ0¹aK“œ=!?÷hÃæÆÇ'"';SuO‡%»NyµæÛ¯FÌ^üòOk³£YR°‹°v]yR— ë̯X»=¢£Ð8.Æ~91B˜tHļٺá÷¸Ýúíïרܹº:zEûÿá­þÏÏ2‘ &ôQ÷Ñû.>AÙ~?…f{E©ÙµÖrÀ¥»¾ò‚ìå%ÊÉΔë”WÎÎà_ÈŸ{êóÞ…®SÞ˜Ýy“‘“©Æµfˆ·bívÙ–šw¸Nõ™!Øúµå“º7Ÿ£¾Z¯OM{»Ì™’íÁ0Äãõ™¡gö¢ 5î¬ éXL¶å…Ú½c½6ln’hDk¾ýªŠ óT\ü¥Ä8.’´{Çzî[ˆ¸`zÑQ`"·ÍÖ Ÿ;6¦»>>m~í»Ð¯ýÿð Gq œ?«öַ̯3F*èã>„ÀlÔòÆ3ª,¿ 56wiçë‡åìt«¨ OûÞxF;7Ó´·kÚÖ]ó˜Mí{·¨èÚdF0éìtË?4¢ìEÚáX§Æµ“^vãÎZíÞ±ÞìV4–ëñú”½(Cë×–Ëóë“vVàÚujß»%äØû É<6É4Tɇ€fè}šŽBq̛͟uù²–yzõ‡â"IÁ ðÝwö葯?Á‘E„ógC‚䌑€îq») ¢j>àUæís§é—]îu騯–£¾:áÇŸ}mÂÇädgÊùv½<Þy¼>¹Nõɶ¼àZçÚõ‰L¬“ˆLvñ¶Û^Q*Ïo^”ë”Wþ¡sý9Ù™q¼DÖ[»®Bµë*Ìe{ú|*.Ȼֱ˜s{YötB˜YÑ: û/^TýúOHsófûäú‚6#(ì9ñ$"„3ÏÓ=n7• &ë/§HnÅ…‹U\¸ø– ]5B¯™X²v ΖcƒÙ‡€n#(ìt hΜ9:;0 ¾õ–þë7¾Aq€4v[*ìD®Ïg†…R0(|÷=]H’†‡#ÂBD*½;Ÿ"À-B@7WmM±æÌ™c~í»tI?|ë- ¤±y©²#Ë<½’¤‹yÁû+ôœø@‹?W ¾ü0G9^ èñÓˆ€™ŒMõª?UÅ—¿ ‹—¦u¹Þ³>í}§‹@ „Œ ŽB =ÍK¥  l ÓÓè•€öÿÃ+ò]è—D@ˆÄäÝ‘¥¼;²(Ü$„\ ôu[ªíÐ2O¯>=ú÷ʹþaÓ¸Gáè¢t`LR2<4h~o™§7侕àÖ" €äR[SLP¤¹y©¸SsÇÆTúÏÿ¬?…Ü£pà“~­þÏÏjÁíùõQ÷Q½w¨%ä„w}|ZY—/SÀM×Õí“Û3œ’ûfûww¨¼,W™·Oþã$!$'cÖã®î`ƒCô2/•w.ü…¾ ýÚÿ¯èkÿ¥fßÁÑO1uU{ëõ¿tqBÀ­æó_•Ï5%÷ÍÝüÜÊ礞G@É H_·¥ú.óôša¡ ›w½¤óg9ú)ä½C-!aÆH@¥ÿü!à¦+\º8möu$ðo“{<!Ì =ÒÓ¼tØIã^t(.’$]½¢ýÿðоúh¾PögÁ,6z% öÖ·t¦ç¤ù½Œ‘€îq»5wlŒn:ÛýÅúþ–ÿ[#єܿ®£nu}àžôó`v‰ÖQøÒÛoëù¿ø Ф¨yé²£¹>Ÿ2FFôqi‰ÆæÎÕÕÑ+jo}Ký½ë«ÖpŸÂYèlïÇjç­ Jr}>-õön©Âü¼”Ý7÷Çý“~!ÌNáAaÿÅ‹Úãtê »â)h^:ílF  {zÜúCq±™ÁPИÐä‘ÕßÐâÏ.匘%>8rHG;†|oñù *èë£8$B˜Ýģî`79A!zæ¥Ûg‚CQÏ„Lh²÷õíúê£5zàËsV$±á¡A½ûÎõ÷ž6¿7wlLË<½Êöû)I„€RC¬ @j™—Ž;m„JY×u¶°@csçJ N~Ñßû±VTƒáÇIèLÏI½ûÎ]½b~ïÓÃÃúüéß3¼€$C@©%ZP8>>®9sæP EÌKçÏõù”uù²Î|þóæðã3='uÖóý‡UkTúÀŸr†$Ñ+½w¨E='>ùþgûÏiɹs€$C@©)<(œ3gA!Bæ¥{措ôŸÿY}øì’‚³¿ûÎ}Ôý¾¾úèjîUx xÿˆŽvéüÔè¨>ú÷Ê(I†€R[¬ Àì7ôõ)Çï×™{î6‡÷÷žÖÞ×·ë/?¬?ý³U A¾‰Îö~¬÷î—ïBè Š‹ü~yz^ @" jZý:üë Œòû‡U´ YÙÐ"ëòeÝ÷áI]¸óNÏ_b~ÿÄûGôQ÷Q† ßÃCƒzïP‹Îôœ ùþ§FGUäéUÖåË €$D@ ÆÆ¬Ç@ê!$ 3wlLKÎSÞÅ‹ê-Z¦Y¸PRèä?ý³Gµ´èŠ5F¯ôáÑ#êþuGÈÐâ¹ccZ|þ÷ ‰HUé–—åMéy…@j!$Œaþè¨îuÿNC99ê+Xª]°@Rpò;ÿÏ«Ê/º[|ùÏtWéýëÄ ¥àÄ2K½} - ‰H¯m}"DAP¤B dûýÊÖ…;ïÔÀgï ¹_aïi-̾CU>Æ0äIÔÑŽ3KRÆH@K½^†äF„‚B U&À:ù“%ŸÓÅ¼ë­ØÃCƒz÷=:Úq@U>¦â’åLpG¼pðS££Zrîåú| €YàÝ÷ÿò5!¤/‚B`ö#$œ„ù££ZæéÕRo_Dg¡Î_p»¾PöJËÒâÏ.¥h )ö¸Oé£î÷Õß{:âçŸÖþ¨l¿Ÿb0K Ùp ŒÎÂ;/\ˆ ¯Ž^щ÷èÄûG”wg¾¾`û²î*½_ ³ïH»:é9©3=êLÏɈû JÁppIÿ9†#­ôh[cÏ´.sä ÷ípk …ÀìEHxŒ°pɹsº˜—§sK>gNp"I¾ ýzïP‹Þ;Ô¢ü¢»õ…²/§üpäógÕÓ}Tu JÁ I>sþ‚2N"¤Àè˜Ü½ãR! AabŽ;¦#ÇŽ%üøJKõÅ{ïUQ~~JÖà…9)n1BÂi’ëó)×çÓPNކr²5”“cvJ×':‘¤»JïW~Ñ=Ê/úü¬’Ö™ž“Œú¸Œ‘€r/ú”íÒüÑQN¤•Âü_+W)€Y¯âË_Ðá#'¸r•€@Êý7¹{‡)Ä4"(œØ‘cÇôâ®]“~Þw¿ùMýhË–”«!á­GH8Ͳý~eûý›Û§¡œùs²u)''ä1Áa¸'%IóÜ®¥Å÷ÌšÐÐ žõœ–ïBÌÇ~jtTÙþ!åù|t "­ef,ÐÎÔªçtÿŒ,ß{Ö§½ïtQh7EÞYú›ú¿ÐH`T…ùy@Jèû$ mn 1Í ÷Å’å,\óç½ýýúùs’¤¿ÿùÏ54<¬oÝJá0­ gÈܱ1³»plî\]\œ§‹¹y d†5¾:z%jh¸øsK•¿ìnÍ¿ýö[Ž^ Èw¾_ÃCƒø¤oÂPÐØï`P:ÄD$@˜Ò»ó)€”wG–òîÈ¢€ &æïêëõðƒÆ}Ì‘cÇôúz ]¾¬Ÿµ¶ê¿<ýô¬züdUÕ„û›‡ð&˜;6¦Ïœ¿ Ïœ¿ « h('[ÃYYú—… C†$K‘¡¡aaöZ˜“«ÅŸÍׂŒLå/»;øýœÜšÅ=<4¨á¡‹øä¬Ù-˜¨Œ‘€>=<¬…—/ ˜u2nŸ¯Â¥‘ÝÑ¿ûý'gN‡|P/lܨïmß.Iz³­mÖÑ-ÊÏO©{,Îv„„7ÙüÑQ30”¤@F†./Z344C¼Á„»ù n×âÏ…vŽv&üj ³†‡5wŒ™U¸9K7%ô8{E‰Š ò´z•M5Ùfl;¶n©’£¾zRÏulkUÃö6IÒøÙ×Ìï;;{´bmðøö½[d¯(½¥Û‰Ä.ÍÓóÏ~=âû›žßEq¦ Aáô¨²ÛÍp¢IO¬C”%MºcϺüeK–$æèéÑÐåàdÙYYz ´ô–Ölªû^Ãtèx$$¼Å2efhxuÁ]ÎÊÒèüùº¼0KWçÏ™19WG¯Lª0žO_ 3FÊ pÓ8;ƒ!Bcs—ì%Ú÷“g”“Ia€YŠ ðÆeǹo¡Á˜ $Zˆø­êjý-[âÞÿðÅ]»ôf[›zûCŒNÆXaÙ‹»vé•={ä¨(?_ÿcËUG9Î/îÚeN\rùƒ$Iߨ¯W[G‡r.T_{{Ìíìíï×ò¯ÃýmÙ¢ï~ó›æÏüÃÃúëíÛõ³ÖÖˆçÅÛ¯mÚ¤#ÇŽé…µlÉ}§¡!d?Þ{ó͸µ›í “ÌüÑQå³ÿ^üÈÈÐØÜ¹¾ÖmhÜÛp*!¢Õ§¯½x pÁÕ«š?:ªŒ@€03ª¨ OµëÊcþÜãõ©å KC—rvºU·µY;k)0‹Þ˜Wöì1ÿ-äúYkkH°etÎq?kmÕ ·[{þçÿŒè¨óë?~ç;:ÑÓ#éz xXºÝ:r옾¶i“ö¼ôRHàþ<ë¶9vL½ýýzâùçõ­êê„&[y²ºZmò«ÕéŒ.JR›Óiþ»Êò˜==zâ¯þ*$ä|øÁ#öãÇ[·ê[ÕÑ»³{ûûCj-I‹²²R: ” g cvà¬k-»ñŒÍ«@Fè)sÇÆ˜aI¥¸0oÂá³®S^Ùÿb›†.Ô´·KuO¯”myaÒ¢4dø1€H…SóÊž=fלôÃêDO.[²D¯9!Ab«Ó©ï8fˆÖùæ›!ÏÿÞ¶mfÐ÷ÂÆ!÷;ìíï×7ž^ºÝz¦¡A?ø ˜ýõöíæóž}â ½°q£ù3ÿð°¾·m›ÞlkÓÏZ[õþäObs†j»]ÙYYº|YmqBÂ7ùKIRUe¥xú‡‡õÿößÔÛ߯ì¬,½°qcH‡¡u?þzûv=PRu8ô›mmf~ðAèéI¨‹s¶#$LAsÇÆ €<žúº IDATdg[^¨ ë´as“$©å€+©CB‰‰Þ‘•¥ÇþôOÓ²o¶µEü¡ÛmvÀY‡ñþxëÖˆN@ã^…ÙYYúÇ×^‹øyµÝ.9zâùçu¢§G?km5»==f0f}VEùùzmëVU<ù¤üÃÃz³µUßýæ7ÍåHÁÐòïêëCž—³p¡^s8ô‡sçtäØ1ý÷×_Ÿ0$”‚Ý„¯ìÙ£7ÛÚô£úúˆ¾==f0ií"ls:C‚Nk@hìÇ?¾öš–WWË?<¬¿ß³G¯9Q·Á:„9]f`&$ÔŠ ®Ï:ëñú,ÿPoßEIReyIÔçú‡FÔýÛ¾à/‡¹*.\s=ÎÎí?Ô-×)¯lË e[^¨Õ–Mê>ˆÖõ•ÝWõ¹ûºäìrËuÊ«œE™×ÖU Õ«›œÅãPÓÞ_ËuÊ+)¤®_û•¸ûfl›±®S^ä÷qUYÌçvt¹CöååŸVËA—ìå¥Z½ªŒÀ7dåWî4CBIú¸¿?mkíÞyÑ,[²D?ª¯è®ëíï7CÆ'««cNÎQm·ë‹%%úÐíÖ›mmf`×ÖÑa>&֌ɔ–êɪ*åçkÙµåÿìZ°(I? ­ž}â sèñ‘cÇ& ݾUUe÷ms:#‚Ec½ÙYY!?{å­·$I_,)‰ 9 NBJŠùüTFH`VjlîŠ:ð•ë”לm8ÞìÀ67ª±¹ËüÚ˜4¥¸0OûÞx&á0̺¾ðÙ]§¼ZóíWC‚N)Ø™èº6;šµóõÃÏwlk•£¾Z[·TŨU§6;öÊ?4ö“.ÕmmVÝÓ+µÃ±.âyö¿ØfîKÓÞ.³FÎN·v¼þÿÉóë™LSâýdDÛÝæ×·ÏŸ¯5_ýjÚÖã‹%%A•µ³ðɪ*}÷‰'bÎl}lõö‹–,чn·>t»#žm;¬Â»îŒeLôýÚ¦M‰]—Âf!–4é‰9ŒpñC·[YÓ£âÂÅòx891%„‰+ÊÏמ—^Ò×6mÒ‡n·þþç?×ÐðpB3OVïMêíÞ‹ÑTÙíÒµ‰XÚœNsø¯1«ñ²%KBBÓ¡)ÎÍð¡Û=a÷eºH($ü—…Y:?g Õ0#®ÎŸOHCC—æ=ïÂyú|rvö¨å`·®=÷Ô#Þwo*²eD„gRp¦bÇ–jmv4ËuÊ+ggOÈðáɰ1ŽÖ‘Xó˜Më×–K’r²3bngx@h,oõª2í?Ø­ý»C~æØÖvý¹?}&jç_ãÎZ9»Üêíóéå7G %™¡¤9H}„“—³p¡Þzé%U|ó›º|Y?kmÕÿUY7ÔêkoŸtG`Q~~H7âdUUVê­mÛ¦}ߟ¬ª vþò—úî7¿)ÿð°9ÔØÚE(ïOhŸ‰I($¼¼p¡.§ÁTÏnññqŠ@ròš÷¼›ÈêUerl©ž‘íˆ7l¶v]¹6;š%IÎ.÷”CœE×—¿as£v8ÖE¬³qgmÜeØËKbn§myaD@(Iûu›ÏìÕ®+WÃö69;ÝòD¬§¨ `7„€pêŠòóõãk³KÒ3 z ¤$dr’/ZîÁw䨱)wÆM¶:úÐíVQ~¾ž¬ªÒÃ>¨#ÇŽéŸ|2#û^e·ëͶ6èé1'=1†?vŸBkWaoO‚s#n‹õƒÌLî-àÖÉ^”¡Õ«Ê´ïgÔòÓggìþwñî5˜“©¢k³+;ct=&¢v]¹²;›»tÇ}›õ¥G †ímæ,Å7²Ñ¸NyÍ.̡ံ·ÅüϘ‘Ùx^¸âB‚Lá«¶ÛUUY))x½¿Þ¾=äçÖ‰:Œá¸±,ÿú×Uñä“Úd™„ÄXvoÜ€í¿¿þº^ܵ˜Ù'/–#ÇŽ©`Å }mÓ&µN°}áû½lÉs¿Œ}«ª¬ŒÚ-iÜ×ñ—qï5øâ®]Zþõ¯ëk›6(ZÄì$\¹r¥òòòäõz©€ÕÙÙIHC•å%r¾]˷ö¼ îÏ‹ óÔÛ绡uädgÊùv½jþòUsY®S^¹NyåØÖªœìLÕ¬*ÓÖ-UÓÖ±gÉØÙé6glžŠâÂL áôùQ}½Ž;¦¡Ë—ÕêtªÕé4;‹òóUUY©¶Ž½ÙÖ¦oUWG¡÷ïþs33‚A)ر÷½kÁã_oß®=/½ñÜV§ÓœÄxîwŸxœ%ø‰¿ú+ý¿?þqDxçÖ÷¶o—xXGŽ‹ºìx¾U]­wíÒ›¿ü¥¹þð.BÓUUf·á_oßõþ'zzôÊž=òkQVVHGfº‹;ÜØf³Éf³Q%3ª§§GcF)ÚlË åùÍ‹j9à þwÐ¥¡Kà/Ñ×&,ilîÒîëU»®bZ×]v_A˜ÑG'!¦‚€pzåçë…Í0=üàƒf(÷ÂÆfˆøµM›ôÂÆªª¬Ô¥¥òë•={ôâ®]’‚“~Ø‘ý@i©þñµ×ôµM›4tù²^ÜµË ­²³²ôÖK/E„u/lܨÞþ~½ÙÖfv*&òÜ¿«¯×Ðð°ÞlkSo¿¾Q½Cüɪ*s['£(?ß¼÷¡»‹Ð`úȱcúÚ¦MQ÷ã­[™Õ8 !!€”å¿Ö©§o öó‡FÌ™‰'{OÀðåtÿ¶OE¹!ÉmË e[^¨º§Wªæ/_Ñþk³9ßÈLÊëó­³+G­w@½}#¶˜ ÂÄ,[²Äìp³ÎÌ;‘mÙbv~èv«·¿ß2û@i©Nµ¶ê•={ô³ÖÖ‰H–-Y¢*»]/lܳ›ï5‡ÃÞk„rÆs¿U]³ÐxÞßÿüçf7£¡ª²ROVWG ä¬5ˆÇÚõø­ªª¸ÍY¸P]?ÿ¹~ÖÚjv²³²Te·ÇŽmÜcÑ:L:!$0ëE›‘W’Z¸&|îþƒÝÚÙðxÔŸíüÉaóßF÷ßTØýzû|qïÃX»®"ê Å7bõª2í?Ø­¦½]rÔǾßá†-Mæ= »cÆ&‰Aê# LÜ·ª«õ­êÉwøƒ±ä,\¨6nœòPÚ‡|0îòã=/‘Ào*5¨¶Û'Ýõ7•úN¥Ó1•ÜÆËÀld½OÞfGsÄÏ›;Õ´·kÂåx¼>mØÜñý–.½üÆ»’‚“¬ÜH'aÍce’¤Ž.wÌàòå7‚dö¢ŒZ—UÝS+ͯùöåñD­“®_[N@ˆ)# f7:  …ôh[c…¦ÉÈ•1Š$±šU6Õ-jÖÐ¥€›»äéóÉ^bëìꑳӭ²û‚3÷û‹¦¨ Ï|~Í*[p&âÎ56ÆìEjܱþ†¶Õ±¥ZÍ]ºКo¿ªÚuå*.\,Ûò¹Nõ©±¹Ó\÷ÔÊi êì¥zî©GôòOÞ•ë”W_ZõCÕ®+—my¡üC#j9è2²û ´³a'¦„€˜ý  …FÇäî½L!i!';Sηëeÿ‹mº³Óm^R0ôjùé3ªÝÜw9;ÖiçOG<_ ˆ-?}æ†ïÓglkÍ_¾ªÞ>Ÿ@†Ûº¥jÚ'ÙÙ𸊠˱½Uþ¡í|ýpÄcÊî+óízº1%„@j $€Y®0?O·ÏWàÊUŠÌ /}ñóa–»xéªÚ:ÎMë2ü£v ¶n ÞtÞ:\xªlË åùõ‹j9èRË—ü—*.È“½¢Äœ!¸v]¹ìå%²——DÝÛò`@ÖØÜsáìå%Ò–È›çæÅÜ?ÛòBy~ó¢›;å:å•ëT°»1gQ†ì奪y¬,ji,/|ûÙCÝÓ+U»®\Í]rvõ˜ºØ–\[·-·îtáîNÙ}+øl†2oŸZ<@@¤Ž9ãããã”À­ôÒK/IcÃzþÙ¯SŒÐsºŸ"3dqî"åÝ‘E!féµqû«m7e]÷.ËÒóJ§wû=ÃÚÞäž±åi}}°¾¾>ÿ¹¨ŸE7=¿+-j‘—3_/>÷ÅI?o¦Â-»®×û'á0ñëëµkÕÐI)¢ôî|Šaç.¢¦,7'Ký©+Ÿò#Rè R!!RVÞYz¦öQyÏÌÌ/Ö‡ÕuìwHQ×T¨óhF7~k÷ésÊËËS^…h¿ûÝÔ®_á¡$B  ¥Ùî/–íþâYvÏé~BB€ëGB6=¿Kª®®NšýÛ´iÓ¤Ÿ- ”D@¤€Û(˜H¬€@j $q…„H1„„ ¦ha}}=…R !!ˆ*V@XXXHq€CH"é…„ Ò!!0é‰H" Ò!! Ò!!iŽ€!!iŽ€!!iŽ€!! Ò!!iŽ€!!i¨¬¬L!€ y”€ôóøã«°°P6›í†Â-»vQP`–#$ åå婺ºšBÄpc0UUUÊÈÈ @Š “LZuuõŒv"nÚ´‰"7!!0 .^ºª¶ŽsÓºÌÿ(…7!!0 |þ«juöS0+qOB`Šç.¢ %ÐILQÞYz¦öQyÏÌÈò}‡ÕuìwÌ8BBàØî/–íþâYvÏé~BBpS0ÜHs„„@š#$Ò!!æ €4GH¤9BB ͸å õ»ßB!’zN÷K’JJJ(€¤EHà–ËÌ̤ÜB„„@š#$pË„¾ÁË)ÇwñrÈç^HF„„n¹‚‚IÒÀÅK)Çwísnaa!Å´ €4GHà–+--•$¹?î§H9=§ûuï½÷RI@R((X*o¿B åüî÷Ÿ0Ô@Ò›G $ƒÂÂer?F!RŒ?„£g05[ví¢À £“@R())QàÊUº RܧÏIbÒÉ)77×ü7!!€¤`|hòž%$@êðžPFF†òòò(Æ$UUU)##ƒB3$77WÕÕÕæ× 7 •‘‘!÷é~UݯΣn €¤×¼¿K+£züñÇ)€Y‡@Òª¨¨Ð½÷Þ£æý ŒR$­žÓýêúÀ­ªª*åååQ³!!€¤V]ýu®\UëÁ(’VsK—222˜ÑÀ¬EH ©•––ê‘GÑ»ÿë”zN÷S$Öƒ¨ïœOµµµÊÌ̤ f%BBI¯ººZKÕôVÃŽT¼ý>µýÓÿVyy¹l60kHz™™™ª­Ý ßà°š~ÑAAF£zu÷!åææjݺuÀ¬FH`V(,,TUU•\'=Ìv €¤Ðô‹ù‡f %Ì£f‹êêjù|>5ýÂ)Iªx¨„¢à–h|Ë)×IÖ¯_¯ÒÒR `Ö£“À¬²nÝ:,UóþNyû}7]çQ·º>pë‘GQEE Ì*™™™ª¯^y‹?£m¯´à¦ê<êVÓ/œ*//×ã?NA¤ BB³ŽjÎmÚöJ«|ƒ—) fœÞ{ï=ª­­¥ R !!€YÉþí¶·é(ÀŒ2‚‚¥zöÙïR)‡À¬UXXÒQHP€™` ëëŸg&c)iÎøøø8e0›ŒŒhÛ¶—äø£j¿a—íþbŠ€iѼ¿S‡œTyy9CŒ¤4BB)Á ûúÎjÝê ­|ø~Š€©¾ ŒªérôH „„RçƒÜȈÕÝÝ-ÛýÅZÿx¥23PLŠ·ß§Ww’opXëÖ­ÓÊ•+) €”GH å>|XÍÍÍÊ»c¡žÙð¨ óó( ÒyÔ­æý]ÒœÛT__¯ÂÂBŠ -HI^¯W¯¼òŠ.^¼¨•߯ªG¤«1ù/«ñ-§Ü§ûuï½÷êÙgŸe‚i…@ÊQkk«Þ}÷]åå.Òº¯…IM¡íŸþ·Z~ ŒŒ UWW3¼@Z"$òzzzÔÜܬ¾¾>•ܯªGÿD¥wçS€4×õ[­É78¬²²2ÕÖÖÒ= mH‡Vkk«a!@³†ƒ¹¹¹ª­­Uii)…Ö ¤•‘‘>|Xºxñ¢Jî^ªŠ‡îUÙò"îYÂ|ƒ—å:éÑá#'å»xIZ¹r¥***(ˆ@ëììTkk«.^¼¨ŒÛÈv‘l÷sßB€1U÷©^¹NöÊuòŒ$éÞ{ïUuu5ƒ†@ÚëééQWW—\.—€2n_ Ò{–¨äî|•ܽD…ùy `–pŸ>'÷ïÏ©çã~¹O÷K’rsse³ÙôçþçÊËã³DCH.—K.—K===ºxñ¢$)ãö*\š«Âü™™™Q; .A˜„„@š»éHs„„@š#$Ò!!æ €4GH¤9BB ÍiŽHs„„@š#$Ò!!æ €4GH¤9BB ÍiŽHs„„@š#$Ò!!æ €4GH¤9BB ÍiŽHs„„@š#$Ò!!æ €4GH¤9BB0¿ß¯ŽŽy<Š1 GòûýIi%ÉårióæÍ =6''Gv»]ëׯWNNNÄÏëêêÔÝÝ­²²2íܹ3)ö¯±±QMMM’¤ööö¨û½cÇÙl6N†$çñxT\\|KΡ†††pð¹çžKø÷ûýQ_/©nóæÍjll ÷íÛ§ššNfܒ׋q.†/ÅŠ’¤õë׫¶¶6ejëýo¦ë™ª¨'¤6BB¸ö¡Ôét&üø––9íØ±#â—)—Ë¥ŽŽ'Íþy<ž¨ûgÝo:œ’›ÓéÔæÍ›µzõj9Ž›¾î 6D|?Ѱòå—_–ÃáÐàà`Z3‡Ã5D½!/fÏ{QCCƒŽ?>©÷¤D555©®®Nûöí“ÝnxKReeeJÕ4ÖûßL×3UQOHm„„¦²²2æ‡SãÃqoo¯ü~¿6lØ ââb>ÌbÆ]>«W¯¾éënll4ÿ½cÇÙívùýþ„:ON§êêêÒò˜Ý6ÙÙÙjll”Íf“Çã¡c1ÕÔÔ¨££cF‚:§Ó™R‚·õ¤žŠ  ŒÝnŸ°SËáp¨¡¡ARp8áñãÇC>èΦ}M¦ŽG$'cˆqYYYÚ~7R·ÚÚZsx1]„HV¼ ‡ã¦w³n&.€)~H6:=\.—\.EAÊãQÔ ©‹NB˜"»Ý®ŽŽI¡÷ós¹\RvvvȰÂðïûý~íß¿ßì6²Ùl %õx<Ú¿È ¾W¯^=é.%¿ß¯îînIÁ.1k˜aì›ñ}¿ß¯¦¦&sÅÅÅZ½zuBHGGGH‡åDûjÝ®ð:NÖþýû#B\»Ý~ÃÃùŒ{ONåLæøÇÁÐÛÛk~o*û`̲kœw‰¬{hhÈü¿ñ½¢¢¢¸ûjCã8Z—g=¦áçYSS“99K´ó+|ûs±²²2êöXÏ%£^áõOäµþzuY×혅¿Îb“ÊÊʘç}ø>Y_›Ö}™è5m¿=9LZ oOäõþ:›è5︯_¿~Âcžèv%²îðºD»åD¢u‰uýŸèºëñxÔÛÛõµ휉v]蜴ž—ÝÝÝš3gNÔ×bøkÛØ'ë÷ã5ÙkŸõ<ÉÉÉ1'3öÿF®÷.—Kû÷ïŸò{ât×3Þû…ñú«¬¬œÒ¦óóDø¶Å»ž¯…ÞÞÞˆ÷ X×Zëë&Övutt$\Ïhï½ñ®—€IŒ···K—4¾uëÖ„ž³uëVó9íííæ÷+++Ç%WVV†<Þúý}ûöçää˜Ï7þËÉÉß½{wÌuŽ×ÖÖF<Ïø¯¶¶v|pp0î¶ÆÚoë>ŒÇ™ßw8Q×7ÑöîÛ·o¼¸¸8ês‹‹‹Ç÷íÛ7áñ¯c¢GÔ[×üøñI/÷Ì™3ãv»=ærív{Ôc0Õã뱓} ŸhÝѶ;Þº'zXaøÖcj=Ïl6[ÈãÊÊÊÌÇ?~bÿÑ^ñö;üu688s{Œcm›¬ë8sæLÄyþÜsÏEÔvçÎQ×a³ÙÌšÅÚ–ÚÚÚ˜Çy÷îÝ1_gñjj>2ÑŠÕ† ÔØØ²FWŒ±ëþÛeÜ´¨¨H555æk ¥¥Å| äää˜?‹wÌŒã~>X—~.µ··Çì’1®%áÝOVMMMfM¬>½½½r¹\ª««Óœ9sÔØØ²­ÖºE܇¬®®N/¿ü²¤`w¨Ýn7'g1^ã_úÒ—´oß>óx…Û¹sgÄq7®Ö×±õ˜cÆ5äøñãSê"zùå—Íå]Æõ!Z]Œkµµ.ÓuŸN£{+ÚkÍz}2ÎáÉ\Û¢½îÄÉÔ­»»Û|-”••©¸¸8¤;wîTvvvÄyb}/0öËf³™aÆ{ÈTç˜õÚ`L¬ÔÒÒ"‡Ã÷ú>“õliiÑš5k̯­ï÷N§Ó|u8***šÒû`´ÏáצhŸ'\.WÔkªQ·ÞÞÞú<±fÍ9Nózb}mE{ïM¤žÖëe¼÷Þɼ§è$€ié$´>¾¨¨(¡NãûÆáá/++3;Μ9òsë_ÐwìØµ£'¼“h:: } ßëúÖ¯_Ñ¡`í ÿëþàà`H=Â~üøññÊÊÊñÊÊʈ}™È¾}ûbn—aýúõ1C<;v숹Íá?_î¿x]>‰˜Ìºm6Û”º£y­Äë겞+ƒƒƒãûöí3Ï9ëñŠV÷3gÎŒMxŽK_½zuDwŒõõ^{£CgõêÕë=~üøxvvv̺Å;fÖ}ŠöóöövsÙÅÅÅ!Û¾Oëׯ?sæÌø™3gBλðÚZ—a½ÞX—c}L¼ý³¾Î*++#jzæÌ™ëY¼.Uëqß½{·¹/ñÎYë¶E;6‰tƪ‹õ\ŠvNÄ«ËT; y¾õy±®mÏ=÷\ÌkP¼õ&ÒIh¼ß…?×Zœœœˆk½ñ***ŠxýîÛ·Ï|îT®1Önµxï©]©gøy8Q=¢¢¢¨y7Ò1o=&ÙÙÙûþú ÿ6¶-;;;j7®õ½¬¦¦fÒŸ'Œ×Mø~Ç{ïWOë{T´Z[íz˜Äç$J¡NkkkÇNgÌÿ‡¿…ÐM$$ŒôX1¶þbzüøñ¸R´_h¬¿ÜHH˜sø¬ñKHqqqÔ_²³³#~9±þgürë´©°®;–ÁÁÁ)…nF}£BÖšCÀ&{ü¬Ç)¼nS ­Ç8Þºãœ7#$ŒužYU¼ó$Öðÿx¾•q.Z÷q¢À¨[eeeÔ *Ö1³¾Îã\Ö_|c i¶Í†E{Z—k9ÖkŠ•<Ç»>X÷3¼÷ð!Õ±¶­²²rÒ×ÉÔ%Ö9o ;nVHh½¶Å»¥A¬šOGHëu`=O¬A`¬ï‡…S Ëâ_ÖÇXCÈXÇ0V=ãÕ%^=­ûïQÖÚN5$ŒŠÅú<‘h f­Ïd?OÄ:Žñ®çñê9Ñq£&•••„„pƒ˜ÝÂ466Ên·ÇüoçÎæcׯ_?¥!B±†§“5H2oÀn §™è¹áCŠ¬Ï¹ÆÈhŒaÖ ŒáÆÏc KÍÉÉ1·×zcóé8~Ǹ}øº§ÂxžËå2‡Y†s¹\r:!Ã+=~±žs#¦rîÄ«ÝL1†²G«ùàà ÚÛÛ#†1F;ã‰÷Z6ÜÑú½Í›7G ë5jêt:'u¼¬·Oµµµ***2Ïë‰Î™XbMB`ý^¬åD;&ÇxžGŽãPÇEQeÕ‹ˆ†!½¾¾µÁG´ß¥ÅךÜ>==M°™=Ù–Õù|~ñ¼YTN&PÅqÌWÀfYÆ'ª¾ïÓr¹ürã%ÛNÜU0éã×,.³»¸e2Ë2 ÃÂ0¼J„ô[¤( šÏçÒ^†Ã!Y–uÖØv­çÖ-#F7¾´=£(’SÀ"e;ŽCišžuÜÆWÄØî(Š(ŽãÊ föì ‚ 1<€v À NL¬d˲nzåH¶%è½ód¡mÛäyY–uÑIƒišäû>ù¾OEQðmcL@ʲŒžžžèp8‰ïÝ~}&‘Ì÷ú?ïÑF£/Ûl6#×u¶ò]Û®l[9›²öf"q†4{ýÀÐe…Î- -¢OApÕUØÙ¥Lû³ßt:m<ÿôW!.h]kl»$}üâœvâ±}ð}ÿjöL’„¦Ó)ýü1#žv½ÿ\ê}¡z½ºØüTÛ^s}KG 8ŽÃÄhÛ*9h0ô^åçº. F×ÔûÚ­Ð$Àœ2þˆÏŒkŒmׂµQ=xF½n§ˆe]£ÁËÆQX¼†=ňT[Q~¼¹ôû„¸"ºILŸ=þX–E÷÷÷‘‹Õ.DBødçÕ?_x=Ïã“òù|.P±óªná…™ \Y–)ëEŸ ¸®[YaÁ&OMQQU‰¶Izš¦;õI2™ðI§ê>–¯xþà%Û¯¯Ò5oqkÜ-‰„¢_¨l.úÒ5f;•À!ó¿&Xä^¢Ÿ«ÃÚÚä'À¬oó¾åg¶/ŠB)¤ŠÎ{kÐÔ¶¬Ü2Q>Š¢FA›‰©õñKiTÂooÍOØøóôôtäëì¼ÑS°,‹¯°UmÏÏÏÒÏžz {ÖÇe6n5­V=/õ>!ŽâõqMvoš¦ôüüLD?W!¾wÿª—‰µ×f³QŽõl¥­Øô"!|išÒÃíV+Úív´Ùlh4ñIåb±8ØwEQÐãã#ÍçsÚl6´Ûíèåå…ùdh½^èæžçñ-MQÑh4âuÝív4NùÆ0Œ# MSr‡Çé%.èºÎ'qÓóó3Ÿîv;z~~æçÛ±‰kŸI+ ;'¥/¶!.êåfŸŸÚ~¬¼«Õªb¿.ˆy×˽Z­èáá‹9Q]m‚5Néùù¹×*Iqr:NyTW5ôé鉦Óie{þ¥V•Y–uäǬ͘?1CÓ´^Á3â8&MÓx›ÌçsžnÝ–ËåÍMz}ßç}- CFôòòRéãlÒ>{ñ< ©óùœ¦Ó)O›¥ÏdâëÉÊÈÚ”õÛ¦­ã¬E¿ÏX%ú¿óîêcÛãã#eYVé 2áêÔ¾x ,*-×G£ƒF£=>>VÆã¾DQ$íGll‚@™¶hϧ§§Š=W«·gÓóFÛD{Šã7Øö]Ö¿YÙÏ·Ò4¥ÇÇÇ£÷ æ7õ÷ Ó4ù1 õ{ëÏIñùqmÄ1ŽõfO±ŸžžŸ½_1â9\”@¹ÝnK"*‰¨\,g¥eÛvID¥mÛÒω¨œL&üÿëMù¿¾¾–š¦)ï%¢r¹\Ý·X,ø÷ªzo·ÛÊw]Ê£J·,Ë2ÏóÆzQiF¹ßïÛ£nÇ6‡C£4M+___yÙt]ï•þz½nmƒÙlvÑö«ÛšýÉlwªï0»ôñé® ‡ÃJ^–eõò³õzÝh3Û¶+í.Ú¿ÉÇ»Ô1Ïó£òwõ㶺í÷ûÒ0ŒÆ6Y¯×ý£©NmùwI§­‹ãšìo8–yž÷.[û°ö’¥ßĵíÒä¯Ãá°<Êôeã+g×±m6›)Ƕº/›¦Ùj—.ý¿Ífªñ‡ùø9cÌ~¿WÚÅ0ŒJ{Ôý·‹=›Úºîÿ¢ÍÛž³Ù¬b7ÕøÛ6^ú>Ñö,Ó4M:®ò>ÑÕŸšì¹Ýn[ŸŸ“É/´p&ˆn ÿ‚½X,ˆèü³w<Ïk´EQåÀmV† ïs]—²,ãÛ+‹¢ ¢(H×u²,‹|ß—Þ¯ª“Xïú}]ìÑô®ëEyžGqó3 t]çŽ×·w)W—¶Ì²ŒÂ0ä6by:ŽCžç‘®ë•è¸Y–u·µ/‹¨X_yÖtøý©íÇVR˜¦Éy°:uEÌ»^n×u¹]Nõé&’$áíQoÓ.~æy™¦IQñí{¬ YÙØŠ¶,Ë*õèêKª:êºNišò6óg~¬Z%×V7˲*i‹þÀú‡¬Ì]ëÔ–—tÚúx’$”$ÉQÛœkÑ>¬¯‰ö1M“÷ç¾\Û.l `þ(ëgªôMÓäý¥ø¨ëØ&FЭm¬­ØÐ¥/véÿm6cãxþ išüÐt^j–eIíÂV¢EÁËV÷_qLl²'£nÏ8Ž•öŒ¢ˆ?+šÆzYÙúPϧëûkWqüéò|>§ß´ùS“=Ç9z†‰6u]÷¦În€ÏÊàï_\Çqøv& ½~uÒ4í´œm¯ÇŸ*RõG¿O°ÈÌÀ¥À™„€‹†! z||T^“eÙMFñ¾" ‡­"dÛêëdYV þs+Qª€¯ Î$\Ïó( Cúñãxí8Nå.€w‚E÷%¹K¾,ºq=âs—À&@þ>Q¾ œ DB¾8\À"!_ˆ„|q ðÅHÀ"!QE4h4]%ý¢(¨( úFȲìâ>àû>F#ò}ÿÓÙCåŸÌQ}hÙdíu«¤iÊí–¦éÉ~T÷ÃsÓ½t9?3·à×·ÎgÏðœþ5úÙg÷àW"!üý2š$ %Irñ´ã8¦‡‡‡/7¿ÕIàt:%Ïó.îišR’$Ÿ®›ü“Ùã£&k«ÕŠ>Õd±( n·SæGu?<7ÝK—ó3óÑ~ýø¬ãžÓŸ¿Ÿ5=§\ŸÀpÝ—ä§§'âFp]—v»Ù¶ cܸ&Iò¥V10<§à£HDAC€³&š\ Çq¨,K\ l7à‹ƒ•„@?Ï£ûñãQe‹KQôýû÷ÊçY–ÑËË ¿Æ²,ÇGiîv;~/Ñ÷ïßi0¦idY–´ »ÝŽŸß£ë:Çc2Mó¬ºEA›Í¦r.ã8¶òEA»Ý®rNSÛ½»ÝŽˆˆ†Ã!éºNY–Ñf³áçžÙ¶MŽãÕ]´éx<–Ú¨­lªûXû¾½½ÑÛÛÛQ9e¤iJ›Í¦S¹Ò4¥···£öeŸ†ÁÛr³Ùðr÷ig±<â}2?m£¯Ê|É4MÇJû‰ˆunò%V—¾e“¥!Ú#IÞæm6Wµ¥*&jj·¾ý¸-¿º¿š¦I¶mwΫÞWUc[—öí;N¼¼¼P–eRŸª}ưsŸ¢ÔË|NØív•ÕÇ–e‘mÛG÷ªžM2írM×>Ô6ÞêºN“ɤS]e~)³S—ºö±G—ºþ ÏéKô¿¢(èåå…÷ý®¾\/wŸ1ãÏé®ý@Jåb±(‰¨¬‹Ûí–žçyéº.ÿ·øgšf¹ßï+÷Ê®#¢Ò¶íÊuyž—žç)¯w]·Ìóü¤zA LW×õòõõµñ^]וõUÝË®Ùn·ÊüÇ)óÉ^²¾´^¯ù÷õq!¦§CØ÷Ë岓Ä>ðúúªìÃA4ö‘¦ºÖï}}}­ô¦ru±Çl6»È8xËÏ馶1MSÙw»<§Û|Ù÷}iÞ–e•ûý¾±Ÿµ½‡©žÓ§ö#=Þ“`è&²É¾¦i¥mÛåx<. è¼˜Š“Û¶ËápX™øÚ¶]™¸äy^yчåb±(‹Eå^]×¥ÂT×:†QÎf³r±X”“ɤòB-›€ˆ“!MÓÊÉdR.‹r<Wî•M D±®n/MÓ*fSÃ0Êñx\™ªÒ¯—m<•M×õÊr½^—¶móüY™lÛæ×‰öb¢+W½­-Ëê-º®[Xþb}'“I£hÁÒ_,•úˆ"KWºøgÝ,ÿÉdrTv™‰™a?Rõý~ß©l]EB±-YÓîÓ–}EºÍf³£¼ëÂB_‘0ÏóŠ0Ïf³ŠÇiµë“ɤӸ# Xb_œL&•¾Þ4NÔýHwf³™tl¬CM"f‘°.ÖÛF]Åò×ýUÕD{‘Ò^â$yž7 |‡Ã¡uÌÅ·®ÏÖ.¢o ‡Ciß—•KU׺_Ö bvõuѪ|EqJ%"öoõ9-ÞÛÔÿd÷6=§ë¾,ë_bÇvv¯øÌè*vyNŸÚ@$€‹‹„lâRŸ8Š/óõIKÓꟲ,++œÚ&Z21£ 6¹S O¢À¤Ês2™Õ÷p8ð‰€lRTyïÏó¼2ATMÔU«9IJÕÓ®—Í4M¥#ê«êåÊó¼"NÔ'^m"!›ôÔï«Û¤nO69Õ4Mº‚¬nÏ>´ùg}uW½l¢øP¯·øÌë}§.n´•­k½T~ÜÔo/%ªÄ&Ñ×êbH_‘ iš¦IE±žMå”õ§¦rŠ÷Ë|#ÏóÎãÄp8,÷û}™çyùúúZ‡Šø¥ZñÇÆ°ñx|¶HØ&öíõqµ‹½Tã ³£¬žËå²õ‡æ#â Í®"¡Ê·Äú˜¦y43[ÊîÍó¼â—b;°ÏeÏ;ѾmöÐ4íbãà->§E[œÛÿlÛ>ú¾þ ²—lܨûdW‘°Ëø+¾ôíG ÀEEB•` ®n¨OZš&âwM/ÑbÙº¾ðvX–Ë%_9 ›´†¡L_|Q¯O ĉ¥l¢-ÖGeSñ%_&˜©&ñõº×…§®"¡Jth.ºˆ„ª­[â„Ll/ñsUÛ‹mqM‘PµBFœìË>oš¬‹}§nÏK‰„M~,®–º†H¨ê¢ËV ÷ ÛÊÊDh¶mTe#U9™]O_\¤ºWì/u‘Qô-Ùý]ÚŸ­¨=W$ì"víª±Kü\u¯øƒ(¼‰¢‹*?Ù}õq³X#ú¡ÊþâJO՘дšO±˜ÍÅ1¯n'Qì”õÛ²,ù 6Õ}ÇÁ[}N÷íõ²w£ÄÜd¾Ñ4nˆþ~I‘ðœ~ ˆn q]Wú¹®ë4ù!Þ]‰ã˜ÿ¿ïûÊëÄïÄ{š\ŸÏçÒrù¾OI’TÒL’„_Acú“É„˜.ÃqéáábÐÏóZË/–Žîyžò`rÇqx{DQtR[«Êuîáô*ÓÛŠÈn†òÞsŠïJSð ÕçQÑ~¿¯*/ë;צÉY;EqTå4ù©˜w“Ú`ö¯à/Ú8Ë2J’DéC®ë*Ë)k_±ÌMudÁšÆ Uñ31 B½mëcX_Ò4¥ÑhDEQÐp8¤$IZý²©?¨e°úO&彺®óñ>Ë2î“b»ÕÛ˜v`~žeY%øEš¦üߪöoÂ0Œ£@S²ºŠåƒM4auý©©®Ì†²ûêýø”º~–çtŸþÇ‚—ô}N‹ÏjÑŸX›¶qCõ =—sú€n@$€Ž4MvN;Ø‹k[dT]×ù‹¾…± ñž4MéþþžF£­V«Ææz”À.öP ,]"höÝÄ<ƒív;åß`0èe¯>íÉÈS„UºõhÈõɶj’ÞÇÖçpª8jYÖQÙØ$sµZÑt:ýÐ~+Úõ¡IE“P!~wÎ$VœŒF#zxx étZ‰R|Ž¿×#ûöí‹÷÷÷GBƒª êùŠbÿýý====ÑjµR¦Õ—ïß¿sè§ÐÖe,ïÛßı¢O_f÷™¦I†aù)ûQÇ0 ò<4M;ÊOü¡á”q¢©ï«¾ë*JÊú€øìë!ŠŸžçq{ˆ×0Ûhšv1‘ðŸÓâw÷÷÷ýïáá¡qŒéû^#¦Óu|»}ú‘øý9?ÂÀWä0|,}Æ>bBÇä8Ÿ°$IR™tº®K³Ù¬÷ŠÙ÷}VPžŠ˜GÓŠÕ=}hš€\cõ›*M6)néÎ]áxŽPÐeb÷òòÂW³½7M~|m»âÛ§øêz½æ‚k–eE_Eëº.¹®ËWþ^ÂßÅò†aHavö…¶É½HEäyÃâ8¦8ŽÉ÷ýNcX—1Rd>Ÿ7®¬ºÄÐVVÑ>¢]×¥ÕjUkYb÷8ŽC›Í†’$áâñËË ¿ÿ=èÓÇE;ÖëºÛí*«ßXºLTs‡^^^¤‚hûŒÏi1ý¦•Ò²çÉ­Œy×~&½÷û üJ@$€¢Ïj˜S&¥º®Sš¦|bÇ1½½½ñ¼Ùä~±XôšhÜl¢n—¢(h:JWè±íš®ëÒÓÓŒu&žç‘ã8†!%IRYÉÄú~´ßï/"r‹"EÛ «sÆ1˲(MSŠ¢ˆâ8¦$I¤cØz½>y{ãp8$ß÷i:R–eAgÑó=a"!Û>lš&Ñê"!¯±ý¶})Ñ7\×åÛoÓ4%˲ø8Âêêº.½¼¼TDSfKÖõŸÓ¢èÕ§ÿ]B,듆®ëèà‘>Ó4éǽ^ÌOÇØJ"6¹g&$Sï/LD·¤†ÁýDz¬ÊY}²` ïñ\cvl—D[ÔŸm¬?&IBišRQ±S &öÇ…nñ9­Úzüφ®y_cl¿å÷ø•€H„(J4Mpã8î½B" ä8Žòe]ÜžÇ&b™ØYVª ûþÚ‘ue/úmeF4 >ý䀵QÓy~Y–5Úã#è*¾]#XHŸ<Ä@²r6­ ê"4MdÅ>ÎJ Á`@Ê#t]¿¸HdY‚˜¬b4Ñýý}oáÄ÷} 4¯¹¤ø!F¾ä ¢ ¥Š2Ë`ö”ù$kGvî ì™Àîa+#û<7.ýlk ž#özÙØ·Û펶×óI’ähëõ¯üœû_ÛØÏž…—:‹­òlËûѾo?jò­¦~ ˆ„ðNÔ_hÅh”óù\ú«|Q4ŸÏùËn×ɇøR¬šìÊ"ëºÎÄq¬|ÑgçEÕ'1×Ä4ÍJÙTâ„X6•ˆÀÎ|ºuÄ(žOOOG¢Sš¦;ÓïZ+§T« Ó4å¾Íþ} V«•T¬‹¢ˆOrë~ÂÊŸe™TdŒ¢¨Óª¼——iJ’„ûïd29k϶6²à02˜€Â|é0›eY¦ŒRÍVü5õÅ6*ŠB9†‰g^Bñ}ŸÛSå7×´—èW²q•ëâ4*áLQÞ[$dueç’ªêÊêÀÎ(­·?óWÖWT¢én·ëQùWxN‹6NÓTiã *¡/ݾõ1\ËÙqç û‘FìG²¼»ô#-”ÊÅbQQY·Û-ÿ|»Ý*ï·m»$¢Ò¶íÊç‡Ã߯ëzéû~¹X,¤éëº^AP&IR&IR†aXêºÎ¿}}íU'V&"*Ç)ã8æiAÀÓÖ4­Ìóœß—çyi¿×ó<~oE¥ã8ü»ñx|”/ûN¬g_›ªÚ#ÏóRÓ4þëºeEe’$eÇ­e›Íf›ø¾Ïˠʳk[·}Þ–®Ênûý¾RgÇqÊ J×u+mØ%:mþÙÖ–Mv‡üsæ×eYJýOf7ÑOLÓ,ƒ h,‡ê^öçû>÷ß÷ùçÃá°âÿ*»0ÿ÷<ß'óã>y×û^“¿¨úM} aýAÖ'Öëuï±­©_ˆ6°,«’7³“ÊÆ]|KLßó<žv݇†Ãa/¿oÊûp8pŸ4M³RîsúCY–åx<®ôcqìíeÆ‘½diQ£ïªÆÁ>Ïz¿ì:–O&“N¾!ë²4ˆ¨Üï÷ʶ<Å>ûsZì¢?]ªÿ5ù²Xn×uy¹}߯أ-¶|ëÏéKõ# ïI0\O$¬¿LËòx}}­ˆ<õ?MÓzOákv»$üÕI’„¼Øn·˜0Ð.n”(Šèþþžîïï•×EÁƒƒ\*‚%à둸Qį²–EQÐÓÓÓ»GšüzàLBàFq]—lÛ¦ÝnÇϬbç„¥iÊÏú#"šÍfØV 8ˆ„À Ç1ù¾O///üà|MÓ( C¬"œ—Ÿ€,ËxôK†ã8X=(¡( MÕ²¬“¢ŽðÕ€HÀKøâ@$à‹‘€/DB¾8 øâ@$€ ˲/Wç4Mi4Ñh4¢4MѾð@$€æùù™¿\½‹¢ $I(I*Šâ—­ãt:%Ïóà耛"!| AP¿¬HöÕq]—¢(‚!7DB¾8 øâü&€cv»%IÂÿmYÇã£ëŠ¢ ïß¿óÛ¶-MO¼NÓ42M“¾ÿN?~ü¨äIDd™¦y”Fš¦´ÛíøÖdÓ4i<“®ëÒ<Ó4¥··7ž^’$<ÉdB¦iVÊÅÊže½¼¼´Ö½N–e´Ûí*A:LÓ$Û¶¥õ¹išÒf³iÍ/Ë2nëáp¨´YýÚz{öÍïí툈ÞÞÞ¸íUùwõ9UûEA›Í†Ûß4MšL&GåÚl6܇Æã1Y–¥Ì£( Úív•À2]ýðÉ(p^__KÓ4K":ú3M³|}}=ºg<ók–Ë¥4]×uù5Ûí¶Ün·Ò<ˆ¨\,•{‡Cé8Žòzß÷¥yÚ¶ÍÓ[¯×G÷í÷ûJ9ò<¯”³^÷ý~/Íg¿ß7–ˆJÏóÊ<Ï+÷‰yo·Û^í´ßï•í$Ëïõõ•·^¯ÓféN&““ó[,ÊkëuÝn·Ê´u]W–·Þ¾º®ÝoYVy8ʲ,Ë ¤y8ŽsÔ6eY–aJÓl+às‘þ¦.¤Ù¶].‹r2™”š¦)E¦<Ïù÷º®sQF–îl6+Ëò§èdÛviF%?Û¶+éï÷ûŠP3ËÙlV.‹r8òÏ]×=ª‘&“ɑأiZY–U¡Ž „†a”¶m—ãñ¸R>]×ĤÃá ,ËŸý‰¢[=ï>"¡˜§¦iåd2)‹E9›Í*åu§rk£ñx¬L{¿ßóû™ |J~ëõº´m›ç©io_QlUù\=m™øÌìkYÖ‘ÕËÅDK±¢O×…éårÉ¿3 ƒ·i½/ÈDsÀç"!”?… Qèª }yžWD¹ú÷âJ5Q,¦ápx”¯¸âL€4M“ 2¢˜Szê"Ýz½.ó?nµZñ³æê, ò¢XU[°úˆöé’ßp8<ʯ0 y{EQtd?]×)Š"2 ƒˆˆžŸŸÓª—M¬¯aÇq¥mu]ç×EÁË"þ¿˜†xŸïûdÛ6 ‡C ü"@$ú?qÈqe ]×¹x$¯`AÀE“ù|N¾ïsI&â´! Y¾ï+¯cßE¡ádbO™pÈêÍêUÁò<§ív{$šŠ8ŽsѶbBW=Їø}–e”$I¥N–eñzˆYD{3qL´7Ëïû÷ïÊüÒ4=ʯ æC¶m7am—$‰T6 Cjc1MU¹DŸd¾* ‰ÏÏÏRáÓ÷}J’¤·ð ¸]ÝðåWÇÝßßó(´2ƒýäÒ4­1lå×ãã#EA«ÕŠˆ~®²ë"ÒÕaå°,«±L,‚.«‹Lê"P6 UªhÀº®K*Ý÷B’ïû\`Fdš&¹®KŽãmÛ‘‹=Ï£ù|NišR–e»°r‡ÃÊçžçñˆÀ}óS‘eß:ùkߺ½Um+~®*£êÞñxL›Í†Ò4¥‡‡²,‹<Ïk4Ÿˆ„€/¸:+Š"¾U¸Ï} ˲h±X𭡆aT¶ëö‰H2a¨l‹ô5í·Ùl(ŽcʲìhKò¥q‡–Ë%Íçsn§0 ¹]×%×ui2™ÝËDB"ªÜS_]X_µéºîÉùµµm_Ÿ{/¢("×u¹x™¦)· [Q;™L.¾Jðq@$ápØye˜êºïß¿óÿÿñãÇÙ"Ÿa·*Ë®»æÊ¯4MééééhKª¦idY9ŽCº®síRø¾O®ëR†”$IÅæqSdž!m·Û£sø&“ ½¼¼Ðf³áBŸ¸ÚQuöã)ù½—Ï]v'«[Ç|ÅjQ\Øô}Ÿ–Ë%~ €@†g zQqÁIÓ4z{{£§§':' <–eÝäÙolû-[Q9›ÍÈu]2M³"V^"X‰ Ó4¹ÈÇÎ dÂÖÛÛ¥iJžçÙÎu]zyyá«-˪`iÚšÛ–Ÿïû½Wzž×xæäGÂVI?w1Š".’² -§l§Ü\øòˆ+íÚĸ,Ë”„³,ã+ælÛæâXQ4N{—˶m"ª®Ll*×{#úX.—\`­¯f¼Æöãz;°(ÊQQ–e<"°,ÀŒëºü{v=k+U€®ùɢȅè6ûˆÑ†ß“z¹,ËâÁx¶Û-ÿüZ"0à}HøòèºÞ(*‰L§Sº¿¿']ׄ›étJEQ¦iEY–E³ÙŒˆ~ j}W˜1ÁJ±dDQD4 Þõl;Q˜lŠêÛfÓ>EAƒÁ€”•u]o]Ù&F©f¶5 ã¨b~ª³%»ä'c<ó24‰€¾ïÓýý= ƒwƒƒ  Á`@ÊüD‘ó#LÀåHD\pʲL¹ê/Š¢ÊŠ3q[jü» øjº ¸9ŸÏEžºØâº.:2N¥÷¦iÊW/†ñ®Û>Åú«VÉ6»TžÃáˆ~®Ú“ Tbfû:l{o–e<ÈŒLèó[­V'ç'F ®—­4•¥EQe+t׳)ÏAô!f™ß1êÛóÓ4¥Ýn×±p{@$ú)Œ°í½QÑãã#­V+.vL§S.‡Ãʪ²4M¹˜bÛvå|9]×ùê>Ù¶cq«óãã#=??óëëgà=>>Ò|>çez~~®œ ¨ZYw-ê"&;ç/Ë2Úl6ÜfbtåK¬:cõ,Š‚éåå…Ûd³ÙT©¨Vÿ™¦ÉÅ?v­ê\Àsòc훦)F#šÏç\`s‡GDŽã¸Ñç4M{·s MÓä«£(¢ÑhD›ÍæÈïX¹ê´ïûä8"ðÙ(”eY–yž—“ɤ$"åŸaå~¿¯ÜcYVID¥¦iåáp¦=›Íx‹Å¢r¿¦i•/var/log/ceilometer/ceilometer-polling.log``. Then, ``kill -USR1 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/ceilometer/ceilometer-polling.log``. Structure of a GMR ------------------ The *GMR* is designed to be extensible; any particular executable may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version information Threads Shows stack traces and thread ids for each of the threads within this process Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids) Configuration Lists all the configuration options currently accessible via the CONF object for the current process Adding Support for GMRs to New Executables ------------------------------------------ Adding support for a *GMR* to a given executable is fairly easy. First import the module (currently residing in oslo-incubator), as well as the Ceilometer version module: .. code-block:: python from oslo_reports import guru_meditation_report as gmr from ceilometer import version Then, register any additional sections (optional): .. code-block:: python TextGuruMeditation.register_section('Some Special Section', some_section_generator) Finally (under main), before running the "main loop" of the executable (usually ``service.server(server)`` or something similar), register the *GMR* hook: .. code-block:: python TextGuruMeditation.setup_autorun(version) Extending the GMR ----------------- As mentioned above, additional sections can be added to the GMR for a particular executable. For more information, see the inline documentation about oslo.reports: `oslo.reports `_ ceilometer-6.1.5/doc/source/5-multi-publish.png0000664000567000056710000012167313072744706022553 0ustar jenkinsjenkins00000000000000‰PNG  IHDRH – \ÛbKGDÿÿÿ ½§“ pHYs  šœtIMEß :2MCÙ IDATxÚìÝX”u¾?þ'í®ˆ3ˆŸ…¡´œÛÍ:¶ ÙQÜ2ÇOÉþèì2ÖvZµs5ÈîÚ¶u²³§ÎI®SºmV3ç³uvÑ®†p7u9lÊt6ûtck-ˆ94¤Ò×Ñ H»öã÷ñ¾¹‡¹g˜ùñ|\WWÀÌ}ßïû}ß÷“÷ûõN¸téÒ%MbW± ˆˆˆˆˆˆˆh²c@BDDDDDDD“"""""""šôѤǀ„ˆˆˆˆˆˆˆ&=$DDDDDDD4é1 !""""""¢I Mz HˆˆˆˆˆˆˆhÒû:»€ˆˆˆˆ®¤ÆÆFø|>ø|>èt:dggC§Ó±cˆˆhTq Ñ$R\\Œ„„$$$@ňïu:ò{`³ÙÝ¿ÛíÚÆétª¾Ïçó¡¼¼©©©0 00™L0 ÈÉÉÁÚµkÃn ‹%è8jÿ¥¦¦º""" """¢IÂív…V«5¦íG\Þ#Š"rrr`µZáóù ×ëQYY‰ÊÊJÉû)..FUUÕÏ×çóÉû‰&Ü!"¢ÉSl†ÉëõÂëõ†|½µµ5äkyyy!_ËÌÌÄ´iÓØ‘DDD4â¤à‘GAmm-êëëáóù Õj£Ú>š÷×ÖÖB£ÑÀï÷«¾îv»Q\\,#‡#d:ÏçƒÙl†Ýn‡ÅbÁ¥K—`±XT÷WTTv„ˆ(Š0™LhnnFyy9ŒFcÔçJDD“’(x<ôõõ¡µµ½½½ðx_Ðô 0Eš c³Ù H´ZmÄi6Êé5áH#8;–’ÑhDyy9|>_TAÎ@R»ÄÕ÷¤]39ù7`¶nžj@q¥ÍÉÎÅœì\,ºå6Àä”ûN¶üÞÏO…œkCC233qÇw@¯×³Ð+Ñ$7pôˆÄ`0 ;;[®ñn¥I¤i6õõõÈÎÎŽj„ÇPCµ"HHHˆ¸F£‰jùa""šÜ&l@âñxpàÀ°£EÒ®™ë„[“R4©ãêܤÀ俢•¸ðeNµŸÀÉ–ÑrôˆüžŽŽØl6$%%áŽ;îÀí·ßΠ„ˆˆhRNÍUÝa2™PUU%/©I¸i6Ò4–Gy$ª6 µXj¬ÁŠ4bÆb± 9”!"¢ÉcÂ$---سgOÈj0@ –HNþ ¸N¸%æú!cUâÔ$ää߀œü°t…-G ù]'zº|Spðßÿýß«W¯æô""¢ID*Œ ©©áÿ(äv»áp8†¢6Í&šé5†´šŒò½jS€ŠŠŠä•qˆˆˆ†cÂ$‘‚‘Œkçâæ¢•crúL<%NM¢[nâ[nÃgímhi~OUÒ××—Ë—Ë…‚‚%DDD“„´ú‹^¯[cÄív£½½6›mЀÄd2Ál6M³±ÛíòòÁ‘‚€ÆÆFÔ××:ZE"…†EV‰ˆhDû€ÄëõÂf³©#ù‹–àæ¢’q7…&¤i8KWqôþ„£þ“\Ü•A Ñä`³Ùàóù Ñh"®Üg³ÙäÚ"n·{Ðé(F£v»‡Z­>Ÿ/âê5“É„ÚÚZˆ¢¨º"ÓéDss3ÊÊÊä0§ªªJ>&ÑHºj¼6¼··{öìÁc=Žä/Z‚ø§ÇqûÝß›”áˆRâÔ$Ü\´ÿðOcÉm+0%qªüšËåÂÖ­[±gÏ~'M@RqÖhF…h4ÁSr‘öçp8ä⧃M¯#HÖ¬YòÒ½J‡f³999¨­­Åúõëå)6ÑŽ8!""ª„K—.]oniiÝn‡×ë úúd1­ _ö…Œ(€´´4”••!??ŸDDD4¸Ýnäää<8èê2Ò’¿Z­çÏŸÑQ\\ ø+£V«…ßï‡V«EvvvÈie™Çöù|Ðétðûý555òë>ŸOIü~¿¼Í+¯¼ÀX,TUU± Å͸Aâõz±cÇTWW…#×Îź7sÄH”#Jn¼yYPßVWWcÇŽ!Á?RáÔh—Þ5›Í!…´m$Ò(ŸÏÕè‰V«…Óé„^¯‡(Š(..FNNŠ‹‹Q^^»ÝŽHÛ´q455aëÖ­A˜’8KWaüᦠ³*ÍhIœš„e+×âîl@Ú5³å¯‹¢ˆ­[·âÀì$""¢q,Úé5A ×냶D¹ßX냂§Ó‰ÊÊJdggÃívÃétÂf³É£AÊÊÊPYY Fƒµk×¢¸¸˜#EˆˆhD‹)6uuu!ìù‹–`é #§&ñ*ÆÁ{ûB¦Ý‚€²²2L›6DDDD#FÅ z$‚ È£FÜn7, ìv;jjjä‘.DDDñ6¦¯×‹íÛ·£££CþZÚ5³±tåš ¿dï•Ðí?õ¯áô§Ÿô÷wZ6lØ€¬¬,v]1n·Z­–ÓmˆˆhČـDEØl6ôõõÉ_Óå-Äíw£FFØÑ?¿ƒÃûA_+--ÅòåËÙ9DDDDDD4!É€DmJÍÒF,ºå6^±Qr¶ó3¼õ»—ÑÓ<Ü•Snˆˆˆˆˆˆh"SIoo/vìØÖÖVùkÉÓµXõûY„õ ¸ðeÞª{™SnˆˆˆˆˆˆhÂ3Ioo/¶mÛTo„SjƆ÷÷áÈ;ûåÏ“’’PQQÁ„ˆˆˆˆˆˆ&Œ1¨…#œR3¶|ÖÞ†½u¯È«Ü$%%aÆ ÈÏÏgçѸwÅǃmÛ¶c-^ý]\§¿™WgŒ9Ûùêÿs{ÐRÀeee(,,dçѸvE†#ãÏÙÎÏðvýoáýü”ü5†$DDDDDD4Þ]±€d`82%q*Öüp#‹±Ž¾ìCýn IV¯^»îº‹CDDD4Áˆ¢4žˆh¬+((@ZZZÌÛ]‘€„áÈø§’p$ ÑÄâõzñØc±#ˆh\ÉÌÌÄO<óvWvCŽL ‰S“°æ‡¡Ë[(Ín·£©©‰CDD“†ÓéDUUœNç„9'·Ûªª*TUUñΞ=ËN ¢qg¨£Þ¾>šìííÅŽ;ŽL‰S“pûÝß Ib·Û‘––ÆÕmˆˆhR’ÊÊJ † qNn·‹PYYÉ‹L²o\¸€Þsì"³:gg kûQ H¤¥|½^/†#…4’D’ìØ±ÈÊÊbÑ„¦ÓéPTTNÇΠ oÊÅ‹È8}šADcÖp’Q™b#…#Êa.%¥ëŽLRH’<] èëëömÛàñxØ9DD4¡™L&8N˜L&vÑ87*I]]]P8R¼ú»˜“ËÞŸ@§&aÕwîǔĩ!‰ÍfCoo/;‡ˆˆÆ­ÆÆÆ¸ìGÅ!mçóùàv»cÚ&Ö÷§}CmÇPÎÉçóÅÔo±œS¬ïêý1”ë)ÿP¶#"¢ØŒx@ÒÐЗË%^¼ú»¸N3{~š9kÖüp£’ttt`ÇŽì""W/^Œ„„ $$$ ''UUU!é‹ rÍŽpû‘þ¿~ýzø|>$$$ !!!èýÒ± ªª ©©©HMMENNRSS#Mu8(..–Û*ýßn·‡ÝÆétÊÛ,^¼©©©(//)ˆö#õ•ÏçÃÚµkƒÚ±víZyŸµµµò9Iï WàÖívcýúõA}0X«ªªä÷*û>\?Äú~e›”÷G,íR^Oeß vþÒvÒ=DDDñ7¢5HZZZ°gÏùóo^Æpd„$%¥ëñæÿ #­­­hhhÀêÕ«Ù9DD4æÙl6¬_¿PTTAàv»át:a±Xàt:qðàÁ˜ö£×ëåýØl¶AG*¬_¿6› z½z½>ŸÍÍͰX,8þ<¬VkÄc ˆ¢ˆÆÆFy Ð+¯¼Uû¬V+ÇúÎï÷cñâÅp»Ý(**aáp8àóù ×ëQ[[+Ÿ—(Šp»ÝX»v->øàƒ :.¢(¢¸¸>ŸÙÙÙòþDQ„Õj•¯ƒV« é7F#_;§Ó Qa2™pòäÉ  +Ö÷«µI«ÕBE477ÃjµBÅûC:ŽtOét:ˆ¢‹ÅATû2ÒùK÷Ðo¼ÁÚ7DDq–péÒ¥K#±ãÞÞ^<öØcòŠ5×Î…ñ‡›Øã“ÄÑ?¿ƒÃûûÁzüñÇY´•ˆˆÆ¼œœ¸ÝnŸ5550›ÍªÁ( 3 ò”ÛY­V”——Ο?/Ò(xå•W‚j¡ˆ¢ƒÁÒfŸÏ‡œœœ˜Û޲ ÙÙÙp8òƒ¿òµmôù|0 hnnê?åu(++ƒÕj•Ï×çóÁd2¡¾¾ƒA#Ün7rrr Ñhàv»ƒ‚å9Içëû ¸¸N§eeerà¡¶ÍÉ“'åàÂáp`íÚµÐh4p8A÷”Éd’Gª¤Y¼x1DQŒúüGRKK ª««Wwwc~ëqþCADc–xÓ7å_xá…˜·±)6—ó]Uz?¯Ö$²è–Û Ë[t?° uR‡Ù×ét°Z­¨¬¬ô¯öV«>ŸEEEAáƒôP\VVqû²²²í”Ÿ+G H£IÊÊÊB Å ‚ ¿.…+ÒÃüpÚÉÀQƒz½°fÍš 6jµZ9´Qž“ÍfƒÛíFvvvP8 m#úF{(¯[NNNÐû¥sª¬¬Dee¥<5%Ö÷û|>\ºt ÙÙÙªÓ©”祬"M‹2›Í!Óhl6²³³Cö%‰åü‰ˆ(>F$ ihh@kk«üyIéz$NMboO2·ßý=ye¯×q4ÑX =ÌKu(”»&“)â´‰4 $ÜÊ6ƒ­x£V€êÃt}}}Ä}J„²È©4…&Üq†³"t<%é_­ß¤°IYSCIa4C iÊ*Êýˆ¢ˆµk×¢¾¾>hŸ‹‹EÞ_¬ï×jµp:p»Ýª™(Š!ç§,ü˽ mc0ž¿tí†:ŠˆˆÔŽÉÀº#Kn[Ák&)ie›]/VË?ð8€åË—³shBðx<òH9"Š¿ÌÌLL›6mTi³Ù`0àv»å‡WA`0PVV6h8¢|À 7Òd°}„ÛN§Ó¡½½=ä8@`¤B¤"®Òû¥)7‘’hÎ1µúHûU;W)”ª¯¯Gss³ê¾¤~B N‡šš”——Ãáp…@F£kÖ¬ :V¬ïWª¯¯—맸Ýnˆ¢¨Z4Uy}b¹¤íƒ¦'©õ‹µဤ··7hÕ’Œkçâæ¢•ìåIlæ¬9XºÂ(×#©««ƒ HKKcçиvàÀÔÕÕ±#ˆFXMMͨ†$‚ È…@ÚÛÛ!Š¢ü5ƒÁ€7Þx#b0œ!ʇãHµB¤ŸÒqOž<9*íî~5MØóÊÎÎFvvvPð`6›!l6ü~?œN'œN'Ìf3Ìf3jjj†ü~i´ÉÀåv‹ŠŠ`0àp8Â:Cé“K—. zþDD4†’ºº:Ö¡‹n¹ Ÿ¹ÃÝz @à¯sì×>øàvÑ(ðx<ÈÏÏÕcJõF¬V+Ün7œN'êëëå‡çE:>ØK£4Ô |ÀN;%6›-êMA@cccØÑceT‚ÑhT­÷‰Á`Gƈ¢‡Ã›Í†öövX­Vèõú i-ѾßçóÉ«ÊHµ[AêóS‰£ „"Ý Ò”.""=q HZZZàr¹äÏYw„”n¿û{øÍ<…‹¾Dkk«ê\]¢ñ*sV’¿ÆŽ Š“ŽÎ>ô]øÛ¨WZN÷äÉ“òC´N§“GH+ÉHu?"=¤××ׇ¬Z"Q®V2:NcDQT H|>êëë¡Óé ×ë¡Õj¡ÓéÐØØ§Ó©Z3$^í*)À‰Øl6äääÈ£HÜn7š››¡Ñhä>‚ Àb±Àh4Ê×Äd2Åü~©°mvvvØþØ^Aäëãp8¢îkéü#`u8HMM ECDDc$ Q5×å-dÝ ’85 7•MµÉËËõ¹åD#¡´$ ùºvQœ<ûJ ŽÚsEéaY­@¨ô ªÑh"îGZ†Õn·Ãl6=Àú|>ÔÖÖÆ­ÍF£v»UUUªàV«UUUÈÎΖà¥mìv{P!RÉ•.ªn4Q[[+÷ßÀ?¨¸ÝnyI݃B§ÓÁáp ¼¼‚ ¨Žð“®t®±¾_YëD´ZP¸ëS[[RtUEÕ°M:iÄÒÀÍçóaýúõðù|xã7ÅQ\V±9pà:::¦Ö,[¹–=K!Ýr2® °ªÍØ)DD4fèt:¹^ÇÀZn·[.‚:Ø*/F£EEEðù|X¼x1jkkÑØØ»ÝŽÅ‹ÇuiV‹ÅFQQ\\ÔæÚÚZ¹ÍÊ©Êö)·‘¼¯ôÊ(ƒ!è:(GY8N¬]ø=Sªý!“ÔåååAÓ„¤0Hyí†ò~ P8UÙ?Rà¥\FY¹/«Õ*/É[\\ »ÝŽÆÆFTUUÉSvÔÎÍš5òù+çv»åíôz½j(FDDW0 éííECCƒâ!ø[HѤ²gIÕ²•ý?È÷ìÙ¯×ËN!"¢1Ãf³É´999HHH@BBrrr Š¢\b0‡C!Ìf3 L&Ο?Tøs¸¤Ñjm–ÚùÈ#„„:‡z½¢(ÊÛ¤¦¦Âf³¡²²òŠ_©}R Sqq1DQ„^¯ t:\Æjµ"55UÞFª!RYY)*±¾_”••É¡Ejj*/^ŒÔÔT˜Íf”••ɯ+iyàììlˆ¢“ɃÁ‹Å‚ììl<òÈ#aïCéþY»vmÈ}¨×ë¯øT("¢‰hØSl”…Y“§k¹j E4sÖÜxó2|øÞ!ùþÙ°a;†ˆˆÆ©ž…´²‰òA×h4† ÒôÀiÒƒ±TøSúšÉd’G œª#=<‡›2îuiYb›Í§Ó4Äb±¨îO«ÕBEØl69(Úë¨N1T‰t^Ò¶_S¶ÏápÈçî:Q'Ož„Õj ¥#ŒFcÈ5ŠõýÒÐÊö”••Éç'Õx.‚ ÈÅ~¥ãHÇFö œæ$Ý?Ò5•FùD:""¾„K‘Ö…DKK ª««åÏïþÁÖ¡A]ø²O.Ø ›7oõ ˆ†ëÙgŸÅñãÇ÷pYkÅóûKQƒd<þŒp»Ýhoo—‹¢äp8°víZqÀ'Š"ü~¿Ÿh§'aÍJ}H`$ó ~_3€@áÚ¢[çZDU­mjû–öïïꃿ+ðW]­ûEÙ&X³Rö¼„ìϾË·Ç aa&ŠnÍ‹¹ï‰ˆˆˆ&}@ÒÒÒôK4RfgÏ“?VsD4<ö]å³5Ó“bZÆf5@؇ðÚ`©ÞRl …y!«ÔHùÅëªwmŽiêNUõXwòñ.}öBH0WÖÁTZ€WjLÚ+›ípì ÅöJMYØþ ×6seŒ%^©. $Ì•ur€!µÓpï6Õ~qìQn© )^kÙÖ]Vj,¥0–„ÖmRîϾË[]ÿ2êÙ™ipÿÏ/ùÍADDDtYTÉñãÇU`‰â-qjÒ®™ ïç§àõzáõz‘––ÆŽ!·ç¬ü`mˆ±h¤Ñ ëËmò·fz„…Y0äÉ£œM­(^Wƒ»6GÜO´Ö>°C-4Ó“`(ȃ.+ ΦVy”Ë`Ç“Ú,µW &ü]}°Õ¹PT‡ÚoC<æAvftYiA#PÖ—Û¡ËL u”}‘™aaf`šÒ^ÍuÀ±W„x̃ƒ»6Ë£I„…™AÇW¶I¤Øêš°^±ÂMQA yp{¼pìáöx±öÃëÎA#O†r/Mú€D9z$yº‰S“Øk4¢fgÏ…÷óSòýÇi6“GCC\. q×]w±Câx£ /j_z@`ô„Ízl5&ùýÖT÷•™²½°0 ¦Òùó²uAá˜J ¡_ñáîðÆÜ6kU)ÀÙÔ gSKÔç.MÒLO‚ãå !—V3 Ž—7@3= >/,ÛÂÓu¼¼Æº¬™0•ZO…ˆF—(Šèèè€×ëŶmÛàñxØ)DD£lЀD9‚dv6y¬C2yI«e€ËåbH'Êz]Ö𧬉Ç<ò¨ó–‡-ôi(ÌGvfàxjõ<¢åØ'Êu=Ì?Zö}ÆA1N'é^õëÚéýç  K†9]­QµM9õű/ú¾jÇW a ]ÖLWêÔïW?oý‚L"Dc\oooÐÏC†$DD£oÐ)6Êú#AB£uHHârM&;ct™ýß?‹|…³©?¸LH@P¡ÑR5ÓÐÞáEã»Ç‡| ®æÀƒ$C’áQuºb›¶æöœEã»ÇQtë|Õ)E£PèÓ×Õ7fûVÙ¶‘ì ý‚̨G¶Ä2†ˆÆ&}¾­îô]øC"¢Q1 éèè<];ƒ½E£ffúùcåœ\š « ³±ÍÖÊ„ˆhE,Òª|0UþEŸh¤M™:Uþ˜Ê&/“Q‡}ÿô*n:eíŽp+¼¨©Ýù¶ü±TÇB9e°)nÏÙa·]Y7d°é6R{4ÓGgIzeÑÛÁÎ5ÖiFÊ}ÖÏÙÂDDcSVú4T˜ò”ø5,ÜJD4"$Êl”Ñ'is+&;wŽ2‰1$‰ãJA ªª÷DU×Ãúâù}Eyr0¢¬ãiuŸ¿‹Wþ æ<ãýÛ‡ÜveÑÁŽ'4‘Vuñ~UO‘j„¸=g‘º  s‚¹òwQí[—5S^HZÍ&œµ?Ú„9A÷wñf'š@’®«¢}£ò/úD£IY,˜&'†$çÕLƒ­Æ$^¼®µaF’øü½¨ªÞƒrK «fz¬U¥Aîeë äw[]“ê~Ê-uòȆÁVŸ‰DX˜%×÷ˆî¬ßlï¿gÂ,ÕoÛ¦\y&¸/vÉÇÞHK»=^¬/W¿çmuMòÊB†®VC4Ñ0$!"=k-ñ›Í%~ite\;§?ý@`š çÜNn¬I2|Æ•›ïBUõøü½0WÖÁºóL¥…ò{Üž³pìkš²a­* YÙÅZU Ç>þ®>¬/·£~_3L¥ÐNŸ_W/¬;Èíú™AÇ kU) ÷nƒ¿«‹W<óƒËå ÁÝá íòÈnÕzʶ¯«†©´@>ß}Q¶® lÛ]­X_nƒ.k&Ö¬ÔCX˜óƒËa«kBóG°Õ¹ ë€ùÁåòÒͶº&yäJvfZPED/$aM"¢+Œç˜ï¿{G\ïÈŸ§h40üý˜•=á.`˱£èéîBÞõ7 E£Pç–8µ¿†€×ëå/Ä$,«¡ËJƒ¹²þ®>¸=^X¶5¨¾7;3 ¶š2Õz­fœ»+`2ÛÐüQ{EÕé/Eyp¼´aØífÁ¹»Æûw ýr b}1tLåæ»`©X=ª}*µM IÂ-É[¶®6kè½jþÑryj´~A¦J9wWÀøÀ4ºZ!óÀd¶©^+Ç˸‚ C""‰€D9l/ãÚ¹ãâdîÛƒgŸ|§<í!¯ý»e –܆ªgwL¨ äß«¶àýwáÅßýK n›P7çÌYsàn=&ß‚ ð;–’Ä£K a\)À±/j¸=^4Ô!?˜ë²Ò`,õ!,Ì‚øÇ'`«k ÚÏ`ûÐe¥¡ró]òÇ@}ªˆ°0 âþÇa«sÁéjÛㅻà aa„…™0ÿh¹êÊ5‘Ž'1ä—ß¾ß `(ÈS݇°0 îwÔ6©/„…YmÃŒ1–x㥠°Õ5©.i,…QΦØê\pwxÑèjEvfZ ŸW Ñ;*áH¤þ$"†$DDe@2Þ¼¹ë7¨¬ü•2#óZ¯¸ ) €À( çþßãˆë|÷ÛËðûCN¸ÑD I’ÄB«™Siá°§¾HK,ûÑeÍ ;Êc°ÑZÍ4˜\²ÜðP'$…ùƒNËì‡Ò6‰±D´NK4mŒµ?‰hüaHBD4r&L@òì“?¬¾÷>|ÌÎÊÆ“Û¯5ì~•Wžhàê6DD4Yqu"¢øûúd:Ù%·.ÃM·.ÃìÌlœò´‡Ô"9åiGÃî×päÝþ¯³3³±¤`Vß{_Èþ^¨ùWddfáîuÿ€#®wðÚËÛÑÝåܽî>y›Sžv¼öò´|tP¼ò.¬¾çû!Ó|”ûkØý*ÞÜrR¦kÛ¨´a0 »_ÅÁ}{ävå/X„ïß¿a\ÔaIÑÎ?ÏEƒidq$ MVIBD_a’ŽŽù㙳fé“È_p£üqeÅTmÛ¡þÀ­ÑbgÝ[ª¯½öòvü»e‹Ê+ïàÍ]¿Á«/mÇoß:ôʯj~‰›n]†÷ß=Œ7wý&èµ#®wð׿ÅÝëîÃ߽Sá"½öæ®Wñâo’¨í/#óZœîø÷íÁ›»^Eõ¯_‹ª~Ê)O;6ÿã÷ÑrìhH»ÞÜý*~òÏÿŠ»×ýؾ®)ÚTùãáü5äw¿ûÞ~ûm~·O` Iˆ&Q±cÇvÑe Iˆˆâ'ìå_ì§ŽíeS4Z|ÿþÅZ¿uc*+6 a÷«ª+Ú tÄõŽŽü¤òi|ði·üßO*Ÿ(ô:0K ¿¹ë7x¨üçøÓ‡üéCÜ–×^ÞŽ¿{'ò®¿¿}ë0>ø´/þîHž® ìOeª´¿ïß¿|Ú?4Ãoß:ŒŒÌkí¬ztÐóéöûðàwïD˱£¸éÖeøýá¿àƒO»ñ§=x¨üçèöûPY±!$<™¨¤‡e?¤á±†$œnC4ñ555±ˆàt"¢ø¸j¢œÈO-Ïà¡òŸËÁ›»~ƒÞücܹôܹô<[õ(ޏÞQÝVšÊòýû7â¾6½vß›ú§Êt|ªºýCå?ÇËCŠF‹?µ<ƒŒÌkÉ)ÓQóâ!á"À’‚Ûpߥå؇ªû[}ï}ø©åùóü…‹°ów€¨BŸW_ÞŽSžvä-¸;ëÞ’§Ó¤h´øqùcr?=û䣓â&ïëëãwú82C3«‹‡6j! ÑÄÇ)—4Ñéó5(fƼC"¢á›P5H~\þî»#ÞÜý*ޏÞsÿïÁ†§¯¾ô<^}éy,)¸-dšÊ“Õ¿ÂÝëî šª£4;ëÚËûQHŠWܺÍå©1w¯»/dJLþ‚@Xrª£=Ìyü\¥ Ù—§ßÂÁý{B‚%é¼¥ f ûî߈jþU.n;™–<~¡ò&~×OpœnC4yl.ËC¾.…At§Û Ï„+Òš¢Ñâ¾6ÉÂ×;8òn æÇéŽOqÄõüî!õD–Ü 0ú¤õã¿àTG;Ny>Å‘wß‘Gž„ 4¤Ñ!Q·qº&âëá ¨.)¸ ï¿{(lP#‘¦Î¼ÿîaœîˆüWƒ–>”Ïh¢`HBDD“C"¢¡›ð«Ø,)¸ K nÃËë/=g«•ë‰(‹”Ü·/Xÿ5¤.Gòt\(u4Ütë²Aß#­†3µš)D“C""𬒠MØ€$//Oþø³ö6ÜŒ•cö$6?ø=twùñ“~:âhŽûØ„–c¢a÷«8â:$$oîú *+6Î{Á(^yfg^{y‰ßÛð«š_â…š3ç;;3º%z«¶íô½á¦¿üRþxÚ´iün¥˜1$!"¢ÉŠ! Qì&Ä’.¿ïòê/¯â§ƒLw‘ê‰twõ/»û쓚?©|Zµ¶G·ß?jçòþ»‡Â¾&n‘Î!œäéôtùå€g¼:{æ³þòüANCÄdlq{ÎB;}´†žDD#m¢‡$Éè9}ššˆâfB$Rmކ×_ [’4ì~MÞpíöµb«àüãïGõ|îÛƒâ•ÁméöûðþÿŽØN¹?n]çþßãµ—·«$G\ïàÁï|ð§=“ªH+MN I®·ç,ªª÷Àéj…Ûã zÍP˜Si!Ö¬Ð30!"!)$™‘›‹¹+W"kÙ2$g¤½Ö)Š8±wNìÝ;"ÇÎ]µ sW®ì7—úõÉ 1%P$ûBw7¿Ñ,yøaÌȇsm'pä¹çÆü=Âë§nB,ó{ßý‘<]ƒn¿~÷N4ì~U=”¡ÀwW-Å)O;’§kp÷å¥{•Qîß²ïÊŠ ƒ.«oÏ>ùhP-”n¿•?Ù€n¿7ݺlТ°Ò(˜ƒûö„Ô!éöûäå} +îd8B“*$áÀ£ÇçïÅÚv çÖ_ÀVç GÀÙÔ “Ù†œ‚_À±Wd§pH2^—NLIá©­¸s狸~ݽ!áÌ>ºwî|3rsãÞ†i³fa– `– Dõõ O-[†oÿú×H7ß`—͘Ÿ‹Y‚€ósÇü=ÂëÞ„A’¢Ñbçïþ€}çÛèöûðÏ›,}væµAaCòt vþîAÁÀ÷ï߈×^ÞŽg«EëGÁìÌkÑÝåÇÁý{pÊÓÊ;áÜÿ{ôtwÊùt]z–ܺ )Óµ8òî;8åiGFæµxrÛ¯Ý~IÁmøIåÓx¶êQTVlÀk/ïÀ’[o“Ï©ÛïCÞ‚£ÚÑD IŽ$iâ1Ö>°CE4Ó“`*-€qeÿ/î/lu.4ºZå0ÅR±•›ïb`H2ÞF’ÌÈÍEÑÖ­r(Òsæ <‡£ãPÿ´ôÔù¹¸îž{œžŽ¹¹¸£¦øÇ‡8ýf„¤  OmeGðúM®€dæÌ™òÇÞÎScþDò.Âÿ¯¾¼]^Ò·ÛïCËå‘$ÉÓ5(^q~ZùtȨ‰ŸZžAŠFƒW_Ú4ââ¦[—¡êÙÈ_p#¾ucZŽÅ)O{Øexãeçïþ€¯Ú‚ƒûúG´¬¾÷>Õ¶‡sß›¿`~UóK¼ÿî¡ (Ö}])ÝþsòÇ,ÒJ IÆŸ¿ëËír8R¶®ÖªRÕ)4¦ÒB8›Z`|`ü]}°lk€~A&Œ%;’ˆˆ!  àÑ-r8rbï^4=ýLÈ{Έ">Þµ…nÁ¼’$¦¤ pËÏâ:á¨Í†£yJ4y’´´þ¡è/|9.N&E£ÅËÃË(jÚÝåGþ‚ ¤íNyÚqªãÓÚ|:7KíkrÈQ÷VØ×–ÜqÛü…‹°³î­@}”.Ä)5ƒggAàõ#®w¢ê‡±¤Ë×dffò»•’ŒÖ óÈáˆÍ¹O …ùpáÞmðwõaýf;„…™ÐeÍdgMò$PÓ!7b8¢Ôôô3HÎÈÀ,½³¹«V¡í­·xщ(j§Ødff¢££p¶ó3Ìœ5g\Ü`µ:ÔÌÎÊñ"±´%n?`Æáj6Ê‘KÊMDñÀddÔ¾ô6 ;3 ÖªÒ¨¶fÁ²y5Ê-uðù{aÝyÖªïȯ»=gÑÞL‹ òT÷áó÷¢ù£ŽËÇž6`ñù{Q¿¿Yá¢ËJCÑ­óþ?šc@£«5ªc7¾Û ñX ÚéIX³RÏ0ˆˆ’„!¶¼ØÓƒ÷ŸßÕ6G_±áï­5€kô‹Â$ÉÈZZˆ)ÉÉ€sÇÛðùÑ£a V&gdàêk®t67Ç|.3rs1KÐG}¼¹¹øÆÕWã‹Ï?—§ ÍÈÍEÖ²¥òöžÃ‡C¶›WR‚äôYßMÛÔ¶KLIvî\¤æö×­H͇K—.á«/¾À¹¶6Õm2—.•ÛÔs¦ÍÍa§?Iý,í/1%sW®À”ädxV=Æ`¤Ð,9}.öôàÜñ¶¨®a´ý/Ã=Þ,½é‹yÛóŸ|ÔÏC¹~ʾ“®_ÇáÃ~ŸHûKÎÈÀ¼ËßÇC½~c* IJJ’?¾ðešÐ¨RŽ\RŽh"bH269öŠðù{æ-ieóƒËa©n€¿«õûšƒ[ UÕ)‡—>{Au{ñ˜Å몕›ï‚¥buH8QU³Ö¨ß ¥¨±„NŠæØ`¸w[Øc@UõXwûG>ïÊ:K¼R]Æ•|ˆˆ!‰²}Ë–É«lœØ»/ê•6Έ"þh.GOg§êCxbJ ¶ü YË–…¼v¡»-¯¿Žf›]%¬Y ½© ð Å1=ô.yx“jÎHÇ[òOc–^f›‡¡àÑ-!Åg/twã¿Ë7ã\[²–-CÁ–ŸÉ}&9×Ö†ÿ.߬ÚƒµíC»ï~½ÿazÞ<9|’ÛùðÃrh´ÿsP?ßøÃâúu÷ªöKÛ[{ñ·oi—ÔÏÍÍ8òÏ᎚jùœ™Lp=óoQ Št~=§ÏàÈóÏã¨e#IlÙ¢Z X­_âŒDºÿwûŽˆç¬7•!ÿž{B®ýÀ~Žõú}sãFä®*¹b×oL$YYY8~ü8à”ûædç‚h4œíüLþ˜Ókˆ!Éø H$Æ}ÌÛW °ï ¬x#ó@XŸ_Ð}þ^¯«–§þdg¦AX˜ aa{E4Ô[ â±ܵ9îAÅúrlu®c;]­htµÂ±W„ÓÕŠöý‚£Iˆˆ!ÉeʇÅ•‡ØÁB5Éøö¯_ØÎ·À¹¶ã¸Øó²–-Erz:™L˜6+®gž‰Ëƒ¯òñ|Û œELI¾Z€;ÞŒù¹¸îžÿÄ”œo;‹_ô`ÊÕÉH͇ĔÜQS ×3ÿ&Ý”FF¤Î›‡)ÉÉ—‹Ü>Re`Û:››qîx¦$_Y‚€äôt,yøa\ž!/Y{±§ÍÍòñ¥sºøE`T†òáúŽšj9Ðé9sçOœÀ¹ãm¸vÙ2¤æÎCîª̘Ÿ6¼õg>ô‡»¶j›20ò>ŒsÇÛ+ÍèõHÎH‡á©­ø£¹Ú²E1zΜA§(âbÏH„~Žåú­yíU¹MÒ~¥ïËäôô¨®_ÑÖ­C¾~c6 QƼðe/ŠŒ°›n]ÆNï·þKÊ‘LD IÆ.wGÿR¾CyÐ×eõs{¼q H,Õ r82p„‡¥b5luMX_n‡xÌseÝ uSb:ö¶9Q+X+© ¬äó+|°ÿqÞHDÄär0¢­OÊlÎÇŸ=pä¹çpýº{±dÓ&ä®*ï“øx×î!K SRp±§?t‰)ÛqÓ¦˜WR‚ÜU%øê‹/T€³–.ÅÅžžö* ÒžÚŠóm'ÐôôÓò†Ä”=µU®Ç2#77øµË¯êmK‘Ûvý½÷ Sá9tçÚÚ°ÿ3ÒA‰pä¹çB®Ïeer8Òl³·=j³É!ÄŒÜ\Ü´i£jm™Yz½¼ý'ûöaÊÕWcÆüùQ­L”˜’‚%›6Éç÷GsyÐÔŽäŒ ü}M5’ÓÓñ­­O¢nõÝò×¥pDm»Ä”<ºYK—"wU :V¿{Dý:(Ã)9±w/Þ>xD‡²Ÿóï¹Gm¶¨®ŸòûäÈóχ|Hß'á·á^¿+íªH/æåõÏ·>;V²ïvÖ½±èêdrÊ}¢ÿÃ^zŽ&VHR ï@w¹\hjjbÇÄ ýr@¢_0´Q_E)Ðvhã9‹Úº(kVêU§¿˜J åå…#XÎÆåØ>¯\“E¿ 6«)dtŠ¡0_®Õ"óÀÙÔ‰ˆ®XH’”ø5C¯×;aÎ1wÕ*ù¡=ÜÔŠwí–G`ÜøÃëxÊ)MO?òz¡»MO?#oîʪS$àãݯ‡´w`MçO=Ì_èîÆÑWúƒ ©®Ì-Y)O‰Ô¶ž3g‹.O-ŠFrF®¿÷Qj+ÿ´½õ–<­h^I ’32T÷ubï^µÙÐsú4εµE=5Cy~?R÷¢çôiyîy9„¦[-*ë¿æÃ©_\Š~Y²iÓ¨Ü#ᮃt±gBFr´½õ–|Í[¹2ª6¥ ‚åè–¹¹ýu@¹æçŽë=r±§;b?}ÕÓ?2$yÖ,ôœ>óç«¶G툥}RÓÙܱ0rÏéÓ8ßv©¹óÂ^?)xšP‰ô -õ{ªýròoàOq§?ý¤ÿ9N±¡+ŽTTTpôRŒ¤‘#ÒH’Ø…ùce=’áðuõ×3’Vš‰e›áPöƒÉl‹ji©b"¢ÑG\Íç‚‘+]‡KFWD»ŠMø¿tù¡tE­õŠœÇ`Ò 8júö±š%Q÷EjnnTõ6”S´ûVnD sIØž3Q¿WÙ†óÇ#7KÕ*§SÅr¤@F¦Åsé\íܹÑßÛ—ƒ¹o\}µzÿƒz#j Hòòòä€ä3÷q$4â>ko zPåC*$†#ñ£ËLC£vľ r´‡²IŒ$ êÚ äççãí·EæNµŸ"4â”Z9½†FÑø2•À¾+PCÃV×kÕw¢ÞÖçïEã»!¢Ù™iq_î6'k&œ»+Fµ?„…™ò´šÑ>6Q4Æj8¦¦HE0g BLÉ"Sÿ**u«ï I:EQuÕ”xŠõ/çÒCé™F~ Tåhƒ{÷©QKœ9ƒý˜Çå÷ErFaZÊPB„”Ó^†{â1ÂJ"M›‰E¸)^ãÕUƒ½Aù€êýœ+ÙÐÈSŽ ÉÏÏg‡Ðˆ`8†Â|dg¦ÆØw½v5“Ù†õå¶ [¬;ÈMM¥q )€Àˆiÿá uõšpÛé2ûCžÁêš Ö6"¢xËáˆ2H«wÌ]¹"êQé——´Å6¥Çóm?À¥ææFX 7 Óf"™E{âMš¶®vÄpúA ¢¬é1ˆ‰{'ʾ_òðÃ0<µ¹«VÚy‘‚kõBÎczËp®ƒò¸©Ú;#7+¬5X²iSTýÝÓ軫¯™5è{¥º)#9 lL$24¤V IDATÓ¦MCff¦êÃ+ÑH`ýb82~IKÖúü½XûÀ¯Tüë÷7ÃVçBñºj¬}`luM¨ªÞ 0F¹¢L´A‚cŸ& Éô=àljAέ¿@œ‡`}ñ€ê{Â…Î0ÅW …ý`pì#,© Ê‘0硨k•MäpD¢\ŠuɦQ7=Ü¿ôê_«ÕHÀ3rs#®<’»jJÞÄœî* ©Ðç,½>bP \¤#†Q2Ã!-ýšµtićæü{îÁœñçÁ¨Ã ŸbZM¤þK¬ý¯×ðçA\¿îÞøž_}Ÿ˜’‚ëï½Y˖Ɉ¢EZr9p^ý+õ|ÞÜ|E®ƒò¸™ú9sÙ2Ì„¨ûXê»ÁVèÉ ºoO¨¯ŠæMÊQ$ÊéDñ¦ àf̘´´4v 1GŒ%ÊÖÈBñºê`áƒ}¿@åæ» ™žÇ^ëËíýaÃîŠe‚•[Õ–Áy`ßõ®j{L¥…ò¨–rË.ÕÃçïE¹e—âúÿ2¤¬…b«kR 7”Û g¤Ú#UÕ{‚FÌ()·çè"¢ñŽ^ä‡ÈeËpçÎÃ>H¦ =¢ \–ô¦‡7©>ÏÈÍÅ77n0ø¸ƒùdoÿÒ¼[~öxzS™üpÏb›‘(ƒ£¢­O†mÛu÷üo¹mÑNj{ë-yÇ77nP!30ÈŠ÷r°ÝÝ­¿¹Q=XS~ý“½{ƒî‘Ä””°Ûå®Z…y%%€{÷kj‹ò:F¸G¤ëÐÙÜ,_‡ ÝÝ8q¹Ý×ß{j?'gd¨n;Ø}+)زEµMÀ2pý.öô ëûd,úz4oRÖ!9ÙòÜ\´’?]hD(8N¯!†#ã“4ŠÄ¾Ëñ˜‹W<Ca ýßÓí^¤j¦Á¯\e¦0ºÌÐPÔ¸R€yzü]}(·Ô¡ù#L¥…£Blu.\ºt Ù™iªK ÛjÊP¼®>/¯x æ—ø2ðW_W/,ÛöÈÁÉ#?º=¨þ‰4m¨½Ã‹ÚoÃßÕ'Ûéj…uçˆÇ¶V•Âpï6ø»úP¼®¦ÒKh§Oƒ¯«Öàl Œ@)[WC!ÿÝ#"†#J?¿¯© ,'𛋵ÿõ<‡O1ÈÍ ú‹öù¶p ¨3Òsú4Ž<ÿ<–lÚ„¹¹XóÚ«øÐn—§„$gd`QY™ü@ø¡ý?‡Õî3¢ˆw¿.R¸:=Ýýºüš¾X@þ=÷È™MO?=j}ªlÛŒÜ\|û׿Æ__ß-÷…ԶĔն)§TlÙ‚OöíÅÅ/¾ÀÇ—ø]O?ƒ¿·Ö 1%wî|ïÚ-?DOINÆ¢õ&ùþcEŸÄ˹¶6ùürW• %#ïÚ-·ûúu÷Ê÷ËÇ»_—ƒ©¶·ÞBæ²¥ÈZºTu»¹%%È]U"÷ÁûÏoöu8±w/æ•”`– zŽüÇsAÛµÿ'²–-ÔädÜQS–×_—ëØ$gdà¦MÃnîú]èîÆûÏoGÁ–Ÿ!9#ßþõ¯qÔn—¯ÑŒù¹¸Qñ}Òôô3q«2®å4ïç§Ðí?M*ÊPÜlù‹ü1 ´ÑñI«™›Õaa,Õ ðwõÁÙÔ*éd¢ù£8›Z±xåS°T¬–G¡Hû³V•Ê#Mlu® ‘$šéIp¹²N5¤0æã—6ÀTnƒ¿«Ö¨N£)[W ZXÖñò9äP;¶µª¶:—ê±……Ypñþhïð†l¯<¶ÍjâÍCD G¸ÐÝ?–—£àÑ-òª6YË–…¾ññî×ñ¡Ý®úÐ&=À/ÙA²äá‡U÷ázæßÐöÖ[Ãnû‘ç¥RQøè–÷ôœ9ƒÆÇŸµÑ#jmKÎHWí‹@8òLHÛεµÉÅ<“3Ò±ÈdÂ…în¹ψ"œ?ÂG·`Jr2®_w¯ê{÷Êí‰ó›’|µ>ÌR™.¢v|×ÓÏà⦷;ßvÎ'žˆK0Ðôô3¸ØóÅ ×Aíé9}4—£è©­HN\‡E¦Á·=#Š!ׯçôùúI÷þM›6"9#]õ¾•î‰6z¾f±X,ƒ½iÚ´iðx<è¼¼¬PŠvfÍÉæOŠ«³ŸáÈ;ûåÏM&¾ño {¿{öì‘?^m˜ÍŽf8—p¤©© çÎ~Ñ,Ò0S›ÈŽàÖ›æâчKä)2é×hàëêÅuóÒq]n: y°YM°T¬†±DÀ»ï‚–pìátµÊ#5¤ ÁTZŸ¿ZÍ4´wxQTc‰Ž—6Ê£>„…Y0ä…¬‚s]n:~üƒo!ýiðå…¯þ®>äÁPkU)ÌÞ¡zé×hðÝ5KS¿tl[IõîØé×h`*-@úÿÒ`jâ×15ñèüÿº _‰[¿™ƒ_=}_ØcOæïÙsþ‹òCÜÌ™3ù½Gt…Â¯× —+ìN¹xiÞs£Þþ¿]¼÷Ûo£ãÐaüíâWø_}…)ÉÉøÚ”)¸ØÓƒ³ Ï¡Ã8´õ)¸ß~»x1üï›}„Oö~ßü_}ÕÿuÅ>Î|ðAÈv zÎt¢Sl–ë4DúºäÔŸÿt¼)ÉÉèúÔƒ³Œþë·xÿùçå))AÇKHÀ¹¶6|ÞÜ6<‰æ=@:Åf|.Š!ÇQkÛß.^ÄÙ?Ɖ½ûðç+Î~ô‘ê^ÛÄUS¦àÿ}õ¾èìD—Ç÷åÐõé§hk؃¾óçðµÄÀ¿ÓS’“ÑÙè§÷Ÿ{+¦ÂDÛŸ±ð:ŒÏÅÀ­¯¾øÉéé8ßvg?þ®®Q=þß.^TÝ®³¹çOœÀ¿~Gž{Nµ0i¸ëÍ=2ðÞV^‡Ã[Ÿ‚ÿÓOUϱïÜ9œÜ·?¨Ÿ¥ûYº†j÷ÆÀë÷EggÐõ;×Ö†öƒN\ìéABB‚Šœ?q}ýu¸žù7ÕýÆóú Õ™ÙýÓðV¯^óö —.]ºíÀÝøë]Ú5³Qú\²âëо7ðá{‡bþÁ=˜‡zHþø…Ê›ØÑ Gâ2räÙgŸÅñãei7—å!_—ÂÎŽÇ/ñuM°lÛKÅ]A M.Ͼ҂ãŸ~ùܼysДK~ï^8---¨®®\ÝÝù­ÇÙ©D4f‰7}³ÿÙï…bÞþªhß((†IÓlˆâÉÝzLõ~#*N«L¥…pÿÏ/å!DD4tãuZ Ñ•u@2mÚ4èk>7¿ëdïQÜœíüLÝ’’’а1ß®dCDD±a8BD»«bysaaÿpgå_û‰†ëèÿüIþ˜á Ã""šÌŽ Í×cy³ HJJB__ºýçñY{ædç²iØ”«×ŒØqö4žfgqIS¿!_‹4í”!mÏp„hòp‰gq¼½‡AJæ¬$×i‡¼=Ã"¢¡ûz¬‚ W²ni~ Û_›ßÃÅ _f̘TŒ/Þœ§Øáã@ÓgñÄľÃ¢IEùH4‘”­Ñ¡PH‹y;†#DDÃsU¬(—Êi9z„ÅZiØŽüiŸü±rW¼dff²“Ç™ŽÎ¾˜·a8B49dee±hÂóú.ļ Ã"¢á‹yIZZæÏŸ//±÷^ã^Ü~÷÷Ø“4$m~/¨8ëòåËã~Œ7¢©©‰=ìÙ³gHÛ1!škoƒ÷ó@M‘=BÃ""šÌŽÅ×UCÝPùù“-Á…/ûØ›“#Úôµ †#DD4™1!"Š¿!$ùùùrñË‹¾ š*A4˜ÏÚÛpªý„üùwÜÁN¡¨1!"¢ÉŒáÑÈøúp6^¾|9ìv;àÈ;ûqp k‘PTï«ú¡ž––ÆN¡¨09â1Ê-uQ½WX˜CAÖ¬âÿ‹]ì»ËÉÜUóöÅë¶ÊÖÀTZ·ýÆ»DDCú·‡áшV@RXXˆ¦¦&¹ ÔÛoþÖü`#{•":úçw‚j°€*E‹áÈÈòù{áljê½Î¦VX_<]VÞxi„…ñ[zÕíñFÝŽpm€¢[óâºßx·“ˆ(V GˆˆFÖׇ»ƒï|ç;xê©§§ÚOàdË_“{–T]ø²/¨¨ïòåËÿöî?(®óÎóýGUXIÝš™H¦ ¾³†)klÚÃdvஊ–¨ŠœYˆPU$íÔÒÚh,ÛÙ*!p&sñ_å&+Ô­È–íD0cYJ•!kÉSÍj™$5JäÚÆÉ¥1M²Â4 ¶k§úþÎñéŸ4Ð èó~U© M÷9Ïùž–éç£ç£G‘åUVR OIAÜP 8:.ÿõMÞšQpd\Û÷4ëê…o(ßµ‰ÂÀ!€¥·è€ÄåriÇŽºté’$éÇoujKÞèÓŸÉ¢ºˆñã·:õñGJ’6nÜÈ褄pdùyJ ÔP—øïghrZ5õj?Óo~Ýùý•=‚°¡®2é5ÀJE8Ëã®t¤²²RYY³ÈÔä ¶"®÷†¥ÀµŸ›ßó‹© Y™œŽlµµxåØ0ûÿþs( ,ÂX>kÓqììlíÝ»—[‘Tϯ›_©°°¢ )‘•¯j§Û\¨Ô×§töïuoÿìÚy¹N½ ŽÜÔðèì‡þ²8Óy¬ü×GÌ&ß•£]Ÿ/’Ó1¿÷A*çë픯ÿ“uEÜ[sçµmpä¦Î]PèÖŒœ²TVRÒú,¡ÉiõþdPþ룒$ç†,íÚY”°v¡Éi ¼=j^Khrzv4Ï­íÚY”Ö5aÜ9„#°¼Ö¦ë@Ñ ¶¾ùƒïkï¬êY?ë½ ©É I³ ³îÛ·¢ )‘ÕÁéˆ?Òó¥Ùdêk+NkiëèWcs·$)üÞ‰„AÀî/Ä,†êtd«æ@¹êk+Rïh$9_çy¿ö×¶+49çç>WhrZûkÛÕyÞ[‹Ò}å©„Ncs·Z^¹sîšúU=âÖÉæê˜×ú¯hûžfIÒÄÛÇ´}O³ü×G$I M]Ië`u €åwW:fíôŽÿvLÿxá,†Þþ•~~ù-óûÊÊJfÅœGV‡Þþw̯ó]éÿ{½}O³|}ƒÊËÍQõžíÚY$dž,…&§ÕÐÔ¥ý‡Û}__@»¿ò‚B“ÓrܹQ_[s®†¦®¤íì<ï—ãö¨‘²’sú‘¯oÐ 3¢í?ܦ†¦.…&§•—›cžÛáÒyÞ¯ûJ¾¡àÈÍ„çÞ}à310‚XÝ.þä·„#p¬MçÁ\.—öîÝ«ŽŽIÒ/~öº'ÿ~vµ±±>œÑùŽ“æ÷÷ß¿ÊËË) RF8²ruž÷›ó¢r—dÿõUï)Q[Ë'ƒÐä´<_jÒÀÛ£jëèWõžsjÏBn8c^ƒï‡u£5¬çjýÞ¥„£2ü×GtèÀµ4î‹ûZÿõ‘ˆ)HÒìH¶ŽÙéIÕ{JÔÒ¸7âܾ¾€ªn7»¿ò¢®¾õ7qÏíëÔ®EfÛ|}ƒªzÄÍXÅf>úWókÂX>w¥û€ååå***2¿¿ôÆisjìçü™“æ®5YYYzúé§) RF8²òôöª·P‡:´û+/|ÒÙ_¢ée%áˆ4;åÅ÷Ã:ååÎŽXi<Ö½¨s!OÕ#,NG¶ê*•—›£¢rcFjví,ŠGŒ×v~ÿ)ó{ëÚ&¡Éiµ~ov÷·¢rÕÖâ9·§´P-{Í6úúqÏ—›£Îï?-÷V—Ü[]ªùKBh SŽÀòºk)êõzµqãFIÒÇ}¨7ð}*mC?ë½ ±á_›ß?õÔStt‘2‘;«±¹[kî9óÇó¥&y¾Ô¤–—/šÏ­¯­X² 5âwöŽlU=2Æûúã®’*c*Lû™þ¸SYªq+øOGäûa]©+Þ½¥qO4ªÆ×ÿI›“…KÖã£MbŸSÂÈ@„#°ü–$ ÉÎÎŽ)Àz$ö½îHEE»Ö`N¹¹¹æ GV¾²’ýÞSKºh²àÅSòÉÿSìH…ÂGÆuߟ}Cþyµ¾rq^ÇtnÈšó9ÖãY¿žœš1GæÄûc„2ÁÑñu(àÍd€‚‚Oþ.ŽÀ±v©o=’MŸÍÕ}Žªg¸xëŽTV²›æVWW§þþ~•””ŽÜaÕ{J’ŽLXÌšéb %B·f|œšåòÝ#¤Ùð¢¦~6Àp:²Uµ³hÎuNR©‡µÁ‘OÂoM[Jí4¶õ™ U[[«ññq•––R¸Ö.åÁËËË400 Iúñ[ÚôÙ-Úôû÷Pù õч3:÷wÇYw ’Í"¾+D¾+gE„ ËÁXÓÄ×P[G¿:/ø5y;ÌMN«­£_mýòî-ÑÉcÞ´œÓ:¤Œ nc´-ÜYk—ú^¯WMMMÕÇ}¨sw\»þãÓ„$ÈGÆ;f>Æ4 ‰„nM§íX©Lq™‹§´Ð …ü×GÔyÞ¯Îó~säF[G¿ŠHÏ"¨î­¹æˆßëx3¬w-õ ŒõH²²f?¼! ;Ûdžž®×#‘êêj¹\.  .ÿõ¹§Œ$[|Õúú|W΢Ú½8«{«K u•òÿÃsºúÖߘ ¹v^ð§åÚós7Y®cdÁ5@úܵ'ÉÉÉQ]]]DHò澯>œádˆKoœÖPà—æ÷ÕÕÕÌŸt”H*kj$ $ÚÏÌîê’—›“p·˜¹øúZsÏAÝ÷gßPçùøç2¶ÏM'Oé'ÓjWš nî~à°ÖÜs0åµJ°0w-׉\.WDH2þÛ1û»ã„$àÒ§¸ösóûŠŠ ÂÀ折ݑ¨ýÌO⎀ظ-¥‘‡ÎÄ}^cs·9ò¢¡®bÁí´­ß»÷9¡Éi3ÌÉÏÍIK}Ü[]æÚ#ÍÝòõ^¿í|–Ö]Ëy2cg!Éêwí§—#‘’’v¬`nÏšœÖÃ;ŸWû™~õöêÜ¿¶ïiº½žGîœÇ MNë¾’oD¼~÷W^PCS—¤ÙN½{È:Ù:t`‡$É×7¨í{štî‚ßÜb·ýL¿Þù¼Ò¤cýCKã^sêÎö=ÍÚ¸Í<·Q'ctÉ\»è`ñÖ.÷ ‘ííí’> IvýǧõéÏdqGV‘è‘#%%%òz½€j”› œGÆc¦‡ìÚY¤šåÚ¾§9éqN«ÖþÃíq§—”•¨ó{O-º­-ûšœQû™~ùúåëŒyŽcC–Z÷¦uª{«K¾Ö©ê?½ áÑqs·œhÕ{JÔÖÂÿ[–ÚÚ;qRB’Õ/:¹ÿþû G€U.ß•£úÚÙé*žEn=ëtdËÿÏ©­£Oçý悪ªzÄ-ïÞRGnšç³ò”H·÷î-U~nŽZ^¹÷ñ$ºëq£µµxåÝ[¢–W.*tkF½ýƒ·×6É‘§¤@Þ½%1ëœ$;^¼öÄ[HÖ½Õ%ÿ[£¶Ž~ùú ŽŒkàíQ=«|WŽj”Ç9b½W‹] ³Ö„Ãáð:y__Ÿ’HÒzÇÝzdï~¶^Á>úpF?~«“‘#°½£GêwÞ‘$ÕV¨0=EÒõ÷ëd@ï¼ûÁì߯ÚZ2½¸S€š›gGûý›©)Ý?øE°bù‹ÿØüúĉó~ý]w²ñ¥¥¥ª®®6¿ŸšœÐ¹¿;®›¿y;»}ôáŒÎýÝqÂ@ƹëN7 ´´Tµµµ[’¬"©®®Ö¾}û( #ܵRâr¹"BIúñ[:æ$ÛßA×~zYoüý úø£ÍǪ««Í…vÈw­¤Æ¸\.=÷ÜsÊÍÍ5 üRg^nbÊÍ2ûèÃ?sR?~«Ó|,++Kµµµ„#€Œs×JkPNNŽž{î9íØ±Ã|ljr‚)7ËèæoÞÓ™—›4ø¥ùØý÷߯#Gް“ #­]© Û·oŸ ÕÖÖ¦™™Ù)6?~«S7ÿeTÿçç«ôéÏdq÷–ÀÏ/¿¥Ÿõ^ˆxlÇŽ¬7ÈhkWrãÜn·ž{î9?~\£££’¤ÀµŸk(ðKýû»UøÐŸpÓä½á_éÇÎE,Äš••%¯×+·ÛMí®•Þ@cÊMII‰ùØÇ}¨KoœÖ¹¿g;àÅúèÃýø­N½ñ÷/D„#¹¹¹zî¹çG¶°vµ4ÔÉðƒü@ï¿ÿ¾$ilø×:ór³>W¶S~nÓnæi(ðK]zãtÄ5YYY*//Wee%ØÆÚÕÔX·Û­‚‚]¼xQÝÝÝæã?ë½ ÿáÿ©>WöÓnRðÞð¯ôóÿþ–Ɔñøý÷߯ýû÷+''‡"leíjkpvv¶*++ÍÑ$ï¼óޤÙn.½qZ?ë=OP’@¢`dãÆÚ·oÓi¶µvµ6Üåré™gžQ__Ÿ:::ÌnJb% F$©¢¢BåååÊÎÎæoÀ¶Ö®ö (--•ÛíÖÅ‹uñâŸAɺÿÔvk”|ôጂƒ×5ð“ÞˆÅW %%%ª¬¬d: £¿™Ñš5ÔH—™þ•"€e·6.˜vS^^7(ùYï ü¤W÷þ‘î+|P÷þQÆÞЛ¿yO×þé¿k(ðˈÅW #@úuœ¡À*·6“.&:(éëë3w¼ùø£¸ös®ý\Ÿúôgô‡EŸSaÑç´é÷ïYõ×=×h‘¬¬,¹Ýn‚ 6mÚd®`i·,‡µ™xQFPRYY©¾¾>uuu™A‰4–\ûée]ûée­wÜmŽ,Ù’÷«ædž­¡À/ô^ð×qCivñÕÒÒRÖ–@yy¹$éæÍ›X"?ü°\.…ËbM8ÛáBGFFÔ××'¿ß–DÛ’÷Úôû[´%ïßjKÞ¬˜uKnþæ= ÿþ•†¿Lø›­#5ÄE€md†– >(Àï>°=|P€ß}`{$ø ¿ûÀöHdÌŶ¶6аÕï>BHóA±¿¿Ÿ`«ß}„$>$VõÅçž|@%E9æc„$€LÿÝGHKƒ€Àªç­Ê'$Ø! , `'„$~k)€Lá­Ê—$õŒÏþ·¿öq¯—âd||\ããã÷¾ž/#$ijÔèof$͆$’TZZJQ`žH2@ww·º»») ø!ÉÈȈêêê”M€UîâÅ‹êèè Àm„$>L±q¢§ÛŒŽŽª©©IÓÓÓXå®^½Jñ²³æ÷o˜L·€ô`É*U¿^ýãzòcŠŒ¶ãOwA¯óVå« ½ÚÏ%}’0’ȹ¿Ÿ¥¬Oÿ…@FÙt÷§å.tÎûuŒ$€Å# Y¥ ó×ë[5R ‰R÷ì(B 3í}Ä¥Âüõ¸‡)62Z©;GÕ»òÍï™nÈdL·€…# ñIvBH C@Àâ…$Ï?ÿ¼FFF( ã’Àü°èd||\MMM„$€ŒDHóÃ"­l¥Ô#×g³ÔÔ6¨™þU333æÂ­.—뎵Ëó¥&õöJ’ÂïHé5¾¾€¶ïi–$õœ©•§´ÐüYCS—›»“Ÿ³´@’äÞêÒ¡¯ìP¾kSJçJƽÕ%§#KU;ݪÞS"§cþ‹á®¹çàœÏq:²åÞš+÷V—v}¾(âÚÓÅhG}m…ê*çõZký­÷3Ù=»íùX¸5±uò' zݶâb½yâDÚÚšššýݶ~½-ëÿìOèÙ'žÈØëSÞ–-üÏh•` Ûq}6[uÞs{P#$±ÛH_ß |}ƒjyù¢îû³oèpC‡B“‹[—Å}D¾¾AÕÔwè¾’o¨­ciþ•249m¶}ûžfí?ܶè¶@¦b$ÉÊuª»[ôÅ/ꃃ#Ħ¦t䥗´õ‹_¤«#HØ’’¬´‘$K¡zO‰ò]9±¿¸oMË}Ô¹ÒòòEGÆuö{O-èx†¶Ž~ Ž+49­ý‡ÛåÞê’{ëükš—›#ïÞ’„?÷_ѹ æ9²ÔÒ¸77ÄÁH’ÄîݼY_®¬œ×óÓáò•+:ØÐÀ›3C?}ZG^z‰B¬2$lË.!‰woIÒ©¾¾€¼‡Û5<:®Îó~ÕÔÿ iÐ0×ñê*å­iSû™þÙÔùý§çÝî|WΜSFü×GäùR“&oͨõ•Kª9PžtªÐæ)-Ly ¤!I|y[¶dô©cŠ [K4ÝÆNÃŽ=¥…òý°VŽ ³C¯[_¹¤àÈÍE³­Å«¼ÜÙQ&Æ(¥àÞêRÍòOÎÛÑÏ›’`º $ƶo$‰ÝþE-ßµI-{µÿðìu·¼rqÑÓU<%æ(__`IR5ÎÓxûëàȸùxp䦆Gß—$••$|½1Å(/wcÒÑ'Æ”žàÈMå»6ɽ5W»vºçÕÖÐä´Þ•$=wÛsüò_Up䦜Žl97dËSRô¬‚#7gÛ9:[‹üÜíÚY4çÈšÐä´z2(ÿõQù¯È½Õ¥|WŽÊþìþ„¯5jg\Kû™~ù¯È¹![Õ{þlEæII’×M~ð$éÁ‚‚„ ­^¾rÅüz[q±._¹±îˆñµcÝ:=TûûrxlL?êíUhjJïÞ¸¡{7oÖ¶âbm+.Žÿÿô©)ó˜ÛŠ‹5<6¦SÝÝšüà=TP Ç+*"Úe´Ý8Ïð³¿7oÖ(+›s‘Ñá±1ýã?ÿ³†ÇÆôîr¬_o^KEYÙ’Õ¾»·wÎóYkqïæÍI¯ÅúÜèû9ßó½{»†Ö:':¿qlã½äX·Neeqß F½ß½q#âýrª»Û|üÞÍ›õxEEĹBSSz­»;âÞ>VQa»Å H ÅäHÍÙúâwo©jê;4ykF½ý﬚v'Ú)§­£?îN21Ë—š$%ßfÿᶸ£Sò]9:û½§R^cÅ}$á.6þë#Úý•"Bžùž«±¹[ M]1×Ôw¨¡®Rõµq_×yޯà çî<ï7ë[s <îkÚõœ©Uû™þˆ54u1 $ÉxÃ7nèÑgž1ƒˆx»Û|÷µ×ôõæÙÿ÷?ûÄÚV\¬/ŒÜ±í¯ššâ#45¥¿nnÖ«]]qÏÿPa¡^ü/ÿ%¦#ý‹ÁAó}§NéÏŸ|ÒÜ-G’}§®Î|Λ'Nèò•+q×Ìø«¦¦„;ÍÌÕ>iv Óéÿú_vöç+45¥GŸy&"tJv>çúõú‹º:M~ð*ÊÊôúíZÇs䥗tüôi9Ö­Ó{>ß‚Îg­½Áø>ºŽ×}½¹9î±¼ô’*=½P_bœêîÖ‘—^Ò¶âb}»¶V~ík‹yý‹õõúre¥^íêÒ_77G¼Œçü·_LÛ½! >,fOIÎ]ÿúˆB“Ó Ú¦×üðu{¤„¤-Òš*ÿõ‘ˆ!ÝZ¿wI¡Éi96Ìn_œïʱŒ&×ö=Íê9S»¨k MN›áˆõ<Ò쨘ö3ý湆úÿï¸÷%Y;À¢è\U=⎠’úÌ‘CÑç6ÜmhêÒÄäÿJ8ª¨ó‚?&@ªÞSÂÿT~ïe¼JGWTèTw·._¹¢ï¾öš¾úØc1`ivD‚Ñ9ÞV\w´ÂƒAÀŸ?ù¤®’"“S·Ï§k€þüÉ'“vrŸüæ7c:Æ_®¨ˆé(_¾rEŽuë´­¸Xš£ŒŸ?XP J'âuÖöEhyµ«KïÞ¸¡á±1ýù“Oê—o¼‘–Ñ ÖsZGI£dŒóýøÔ)ógNuw«ûö(œDíx­{öV·,Ú;ßó54Fsµ1î¡õ½a ®¬õ3jßåóiøÆ ý·_ŒÛæwoÜ0aÜë}{²±Q“SSúzssĨ”ËW®èò•+³áÏ×¾¦ëo¼Á_fàÃb4÷V—Ù¡ö_Yð´˜–—/šÁEYIÁ¢‚–¹‚…à gÌï£;ÿé:GѹêüþSSF:Ïûå½½½ðá†õœ©[ð9:/øÍѾÖÅ„-žÒí?Ü®Ðä´Ú:úUó—å Ûéûa]D½;Ïûµû+/˜aˆµFÁ‘›f8ï­ î¶¾rIž’¸5n}åRÄëƒ#7º5ÃÿP~ï­ “|pt@<ÑÓZ¾]W§ËW®èÝ7ô­—_V…Ç£¼-[ššÒ“ßü¦¤Ù)¯=j¾Æ±aŒ,øN]]Ìq¼ô’Ù17‚ãZ  /ß¼v Šçò•+æ9ßþ¸BSSúîéÓúN]ùø©ÛáÇ©®®˜¶JR—Ïg†F€Ôu;„šÏù*,Ô›'NèÈK/™#râ,2‚+Ǻuz±¡!&|zµ«KO66êZ  #/½d^‹ÕðؘëÖ©ïÔ©ˆ{j¼V’¾Þܬ ôúÑ£SnŒ{3<6fÞ@ÊsrrTÈD ¼=ª5kÖÄý™ÿúˆ:/øåëûd^uC‚is~`½5c®qÜš–¯Pmý MNK’ví,Z²‘*ÑÁÆ4ŒTêpC‡|}ƒ‹ZgÅ:µ%Þ5x÷–Ê×7¨Ð­i9Y Žíܵ³Hç. Ä,˜ÛÐÔm~ÝÖâ»fH[‹W¾þA Ž«õ{†PÖ׳ö°úïuttØæwž4¤êƒŸÿ<â{çúõ:ÑÐðIXÑØhvÎõ‹ s®ãñ{njJÇOŸ–$U”•ÅÞòPa¡^lhУÏ<£k€^íêŠ@l+.Žx}¼ç8Ö­‹ GŒ×V”•©»·7&Dê¾=åÞÍ›ãFHb\‡uMŽÅ$‰‚*£&O?ú¨~18¨‡,£q*,Ô½›7ëÝ7têG?ŠÛ^ãz,(ˆ˜.“ÊùÞ‹8ß\¬Á˳O<Ž÷éÚà ŽŸ>­ã§Oë«>÷=ôÕÇ‹ ¼¾\Y©¯75™kšD‡#FdÜÀÕÔw¤üܓǪ[ø¦¢¬¤@mǼKr½Õ{Jvø½{Kt¸a¶¾þÁ_«sÃ'¡Æá†Õ®ˆ :ÚZ’_ß®E GêXGY{kÀ¬_²pÉ»·DÍݳ!Mœ©Wy¹9K: Àò›™™Ñôô4…H‘BSU¾ÞÜlv>¯¨ˆÛNÆè¬àD*=9Ö­3GÁÄ ?RY$õ¡Â„ÓN*,4§lX½ÞÔ4»hlÔÚÑáQ:Y§ =ÙØ¨ÿ§¶6æñFZH³£=¾Þܬk€†ÇÆb05F˜X§-æ|鸿Ñ!F¼€$Ñý5¦Ò$Z–uGH %ÓþïˆE“¤êêjF¤(/7Gž’5ÔU,ùH‚²’y÷–È»wé†'[×ÄéÈV^nކGÇåK0Ú%U©¡¹K“·fÔòòEµ¼<;RÃØ½&•ða¾Epä¦9úæ>WNÂÑ:’"F Å›zµk¿¸ó¿÷\.—·ÿ•;ÓÃxS æëÙ'žP—ϧ_ 껯½fv°¿½€´u´Å\S€òî¹Gׄ#4Ré/tô€sýz9£Ž-лÿò/ºÌkêR*¬Ð«]]zµ«KÛŠ‹Uéñèßÿñ'½Ö Ç\æTwwĨk`a]d[qñ‚Ï—ÊýÍÛ²%å-ôþÎgäH ¥‰+q¶ÿúhÚiꑨÓ{¬a¯Ü[sãvÐÓ¹ÖHѹjiÜ7”XÎÑ ž9¶×ÍwÍ$‹ a6É÷Ã:Uý§Ìcuž÷›;Éä»rTõˆ[‡¾²#m¡“õ^·uôÇÝ¥'åöçüÞƒ4;•aë¿h~ÿºº¢¸f ¦þ"Å€Å:d9êîV·Ïg.ú¹ÔÞ’Ò k@’Ž‘þë#樂¹FMd ë(ŠÄuMHL¦q·§#[Þ½¥æ”!__À\Œvxt\¡Éií¯m×ÐOޤµÞ½¥qwÆ@8‚Ô] ô­—_6;¾“| .ŸO]>ß¼× ÙV\lvÚÓ1ýg)‹ÃJ³#/¾úØcævÅË¡Òã1ëjLçyµ»Ûñ×Íͪðx"Ú1E§»[ß),4C-ǺuIïSôùŒmx“o®ûkìxƒ•ã.J€pdúŽH´$Ö`’éígöú†¬´L9ilîŽè0ÛA*[ÑG2 MN›Û/6Tš ¨"Ïå)-TC]¥‚ÿtD»vͶgd\¾¾Å³µ†>Æ5$¬ÁÈͤk” Aä¶­×»ºÌ>ŸjlÔp’…Lãq¬[¼$c,:ºœ¬£.*ÊÊôzS“¶Ç„KÑ®k@LM*,ÔW{Lý¯½¦o×ÖÎþŽžšŠ;íÈ%ò£Û ÏžúÑ"Oõ|Ï>ñ„ú_{Íœª“è|Éîo*Ï_®©K 3™ùè_%IYYYzê©§–ýCbÕÎO¶NmýÞEsñÌD|}´uÌ%©­£Ï\ï¢èÜ„[¹®Ú $A=}ýs ñvù¤nŸ¬Û±˜šåÿ»gõð矗÷p{Âç,Ehe„.çÞHúžÛ_Û.Ï—š´æžƒs¾7ŽØQô–¾Îõëu¢¾Þì8?ÙØ8¯ãUXF2|÷ö.&‰‚ŠÒÇ×Ö/~Q–íz­ †&›"=åh±¾pð J\~ík ŸcmO¼`ÁØ¥fxl,b»]ëî5ó9Ÿu TªA†qCSSzµ«+áó^íêÒTîöíIŸX’p¤®®Nn÷ò‡NG¶Ø!iv„Àö=Í ÿUÿÜ¿vxÑü¾¡®rAç MNëÜ¿¶ïiÒþÛsdž¬9·“]-¬£ršc?Ttž÷«õ•Ks'82®ý‡Ûb÷_Qã±ÙQ7©î4“HÕ#³AEoÿ TEk?ÓoÞ£t-R[s Ü|/lßÓ7ühëè3G5Uï)IëB¼G2Áå+Wt䥗f;¾eeæ4 c”ñcgC²Q"ÆÒì¢Çã„$¡©©ˆŽûW}tٮٺým¢QÖ)Géblik¬û±àª$=TP7@1Úÿ×·wµy°  nг˜ó%»¿·§úmˆ·“͵@ÀlŸcݺ”×7Áâ° ÂK8âr¹îX›j+Õy~@ããò_ÑßþöÎ0YrnÈVèÖ´ü×G#:±õµsv–·ïiNéüŽ Yj;æ]Öb–RÕN·j6thòÖŒZ_¹¤·GUµÓ­Ð­ù¯¨ó¼_y¹9rnÈÒÀÛ£ÉëÒѯà踼{KåÜ%_ÿ Z^¾h©[õ¢ï}[G¿&oÍh÷W^wo‰<¥…ÊÏÍ™më¿RÔ(O[Há)-Ô¡;ÔúÊ%ù¯è¾’o¨æ@¹Ü[sº5#__À%“hW!„#™p¬û“?™×kÞŸžllÔã–§º»Í@ÁXó#[þ>^Y©ïž>­woÜГúÇþgm+.Ö½›7냃꺽›Ž$=ýè£ w–ùrE…¾ÞÜlÖ/Þè‘ÅžÏz?ýÚ×tïæÍÚV\¬gŸxBÎõëõzS“¾pð BSSúÂÁƒúêcé¡‚9Ö¯Ÿ½¿]]戔…l Xµáˆ4;ŠÄÿÖßÈ{¸ÍœÖ‘h‰cC–Z÷¦eÚ…cC–ªvºÕPW‘¶-dW§#[¾ÖÉó¥&MÞš‘¯o0b}—¢rÕÖâUM}GÒã´󪡩+æõ’”—›£Îï?µèºm5¶ùM´íî¡;­S–ÅS̼~ô¨¹ín¼ó¦R“Ç++õõææˆïÓ}>#ÌúÅà ¹ÛMôÏßÞRoéšÎó¸·ºüɵuô«óÂìÔç†,U=â6kèÝ["OIAÌZ.F;Ü[såÿ‡çÔÖÑg†ÑÇHµþÉêhÝæ××7¨àèxÄñ¼{Kâ1Æñ’­E3×û¡¥qŸj”«óü€Y'ëû-Q—ʹެTÆô—…¸wóf…¦¦tïæÍzö‰'’î€òPa¡N=jÆö²’ôº:=TP wo܈ØvÖ‰·e‹Þá0§JËÖÃ@"$ àr¹ô­o}‹BKè`Ô<ÑTE‡#EEEòz½„#@†(//WNNŽFFF(2J XPøG8éC@ cD‡#%%%òz½È0n·[n·›B ãÌ7 !€ôº‹È„#;!€ô# °êŽì„p–Sl¬êˆçGG¶úÝG8Kƒ€@Æ|@$Øíwᤠ€Œø€¸cÇíÛ·âló»pÒ‹€É4Ò– IDATàwØ‹´à"üîÛ# ÀDøÝ¶Ç+ÞxècýW|@Øá,?+Þß¾ø¶f>úW> laz†pî+žŽdeeiïÞ½|@d´K?ýŸßŽÀò °*dee©®®N.—‹blƒp–‹´XñGvD8Ë‹€ÀŠ´qãFI„#{"€åÇ+Ò3Ï<£¾¾>•––*''‡‚2Zii©º»»%ŽÀB@`EÊÉÉQee%…Øæ÷Þ‘#G4==ͨI¸CH€ ''‡Q“p± °=`{$ÀöH€íÛ# ¶G@l€Ø °=`{$ÀöH€íÛ# ¶G@l€Ø °=`{$ÀöH€íÛ[K `qúý7õÎðH“÷o}LÀ²# €EêxŸ"«Sl`\.E–Avv6EË‚$°•••Ú´i“¦§§)°Drrr#À²! €ÈÎÎVyy9…2Sl€íÛ# ¶G@l€Ø °=`{k)€¹|øñÇúõ@Æ" 0§±÷ß×w»º(€ŒÅqeggS«NVVÖ‚^Çq¹\.•””èæÍ›ÀªñðÃ/èukÂáp˜ò;cŠ °=`{$ÀöH€íÛ# ¶G@l€Ø °=`{$ÀöH€íÛ# ¶G@l€ØÞZJɃA +//Oùùùw¤ ¡PH’¤²²2nÊ ¯'÷kõa Ì¡¦¦FGƒAmß¾}YÚàóùäñxT__Ÿñõnll”ÏçKëñ¢ïßï—ÇãÑ¡C‡Vüñ±<As0F¸Ýnó±ÎÎNíÞ½{ÙFøýþ˜6d¢ûî»OÁ`PCCCKz<#€Yl=—úøX>kÂáp˜2ÀüÔÔÔ¨µµUõõõjhh  iàóù´}ûv9…B¡EÏï÷ëá‡NÛñ–ûøX^L±€vŽ%F,EM=Ϫ¸G¼2 Sl,¹`0¨öövIR}}½ü~¿Z[[ åt:uèÐ!³S ÕØØ¨`0hv–­» ÕÚÚjvT%©ªªJÕÕÕr:1ÏokkÓðð°êëë …ÔÞÞn®‘ŸŸ¯úúú˜EX}>Ÿz{{UVV&Çc£··W’tîÜ9 ¨ºº:âµ>ŸOíííæuäç竺º:nç? ©µµUyyyòz½êììTkk«Y¯üü|µ··Ëáp¨¦¦&æz:$§Ó©öövµµµ%½kûZ[[ …ät:ͺ÷ÊhK:B„öövóYÏe}ιsçÌi)“““jll4ký>²®Ob¬óa½ßÑÏ[³fMÄñ¢ßÉÞSñÚ»˜ãK2_k¼7’½gSiÒ$ KììÙ³aIá²²²ðÉ“'Ã’bþôôô„Ož<v:1?;tèPÌ1[ZZ"žSTTd~ív»Ã1¯‘v8á«W¯†óóócÎãt:c^WVV–>yòd8Ç}µk511öx<æãyyya‡Ãa~ïõzÖçСCáúúúˆã^½zÕ¬YYYYÂëq»Ý)]ÏÄÄD¸ªª*î5x½Þð±cÇÂ’Âõõõ‹¾ïÖ{]·Ûm>/úš?FÍ£ï:­ÏëxÖ÷ãR´7Ññ'&&âÞ'ㆆ†Ô¤ €%gt(óóóÇ#¢3kFDZ¾¾ÞìÔu§Óq<ãñèc ™A‰Çã‰xMOOÙ§ÓÞµk—Ù!˜˜çååÅtÊÃá°Ø\½z5æüÑàp8l†#EEEážžžˆóÜèsõ1jQ__oFáp8|èС˜ÐâêÕ«1×c´Ñz=ÇŽKØ>ëõWWWG܇³gÏ.êž E8Övm‹aŒÎt¨cÑ÷{bb¬Mô{Äz¼èàÁ¨·5x›˜˜;ΰÃሸo©´7ÕãáZô{ϸïÖ÷ìBÚ+œÑŒî,[;“ñ:åF¨a¡at¼£C ëÏ ÂÚq5Bx¡A8ïÚµ+¦Ói=—•&DwPk)**Š;‚ÅŸŸ·>‰®Éø¹µ>ÖÑñ:ÊñÚ8Wû¬£p¢;ýóeÜ»]»vÅü,Þ ãùyyy1Ï7‚…è`):¨°Ö.Ùñ¢GYŸ/ô2Úkµ1ßãAN¼ãOLLÄ¼ÏæÛ,‹´XrÆz5551 Zk+ìÚµKUUU1ksHŠØJר1¦ºº:îâ˜ùùùæãÖµ*ŒóäååE¬ã}.ë1×Doåklû½žˆ±nHKKKÜõ$Œë3Öžˆ>^}}}Ük2êgý™qmyyyqwѱ®}2ßöååå%\»d!÷ÞXçźnHOOZZZb®'Þõ éêÕ«s®‰bms²TzÆ[&Y{Ož<¹ ãkÌXß¿VN§SõõõêééYp{°x,Ò `IYPLá@¼Îo¼N¨±øe¼c% ¬çI´%¯Ñ©^h5^'8^»:;; …”———pk(a,Ž Íp&Þ5YƒkÛæºãçFÛÚÚælŸ!»²x<i``@»wïV~~¾ÌÅH£Û0×6ùùùêíí•ßï7ë …Ìú8ŽˆúÎuïGD=çÛÞùß8’Õ>ú>η=X‰Â˜T®'zDL²:G·/]ÛÖú|>Õ××Ëáp( Êëõê¾ûî‹!Å:Ö6íß¿_wß}·<jjjÔÐР††={Vq_—h”O²‘ io*Ç·îV³Tõ €U$ @ŠŠŠâ†‰:ÍÉ:ð¡PÈ b3Ñ(Œè6FO¥I6µ%^G^RÒ©)ÖvD?–(¸ˆ×á^Èõ¤Ò¾dSOÂétª¡¡AÁ`0¢£¿{÷îˆN~² l÷îÝjkkS^^žŽ;¦žž )+ šu±¾.Ùñ’½ÓÑÞd¡Z¼÷x:ê«@¼!Yçßú³D#&''žÏ:!zäI¢`%^§6ÑTŒDlëè¹ÚfI0×È“x#’Õ-ÑÏÎy¢vú|¾9›ªÖÖV566ÆíèWWWK’>ÓÞ¢¢¢˜ëðù|r8òûýª©©‘Çã1ï‡5 ‹WŸèã%ªMºÚ›Ê½‰§±±QækçÛXæšÒ0Wxba–¯£ …ÌŽ¥uM“¹:­ñBœD¯I4ªÅè´Ÿ;w.aoý”dR(ŠÛŽTkh¬}¡PÈìp'Í3Æ4˜èÅhN§yíÖŸ¥²ÖK¼6ëªD¿6Ùû*^½ÓÕÞDÇŸkTQCCƒŽ;f>o¾í €n®© ‰Â†D|·ÛmNO±þ »ÑÉß½{·‚Á ŠŠŠ"½L´^D²À!ÑÔ—x»ÃX¿ß3ýÁï÷k÷îÝ’fwª1^k‰lý‘èõNæš ¯“^UUeNÑØ¿Äõlß¾=aèÒÖÖ¦ÆÆFµµµ¥|ßÝ#k]­Á—qîèšß Äb£(â­}¼DïÇTÚk-2ßã÷©µµ5â:Œé2F(b\ÃBÚcŒBÁ"°Ó1€¥rìØ±°¤ð®]»b~644–NÔ-)++ K Ÿ={6âñ«W¯š¯s»Ýᆆ†pMMMØét†%…‹ŠŠÂæó'&&’ž§§§',)ìp8"w»Ý1ç·Ëív‡=OøêÕ«æÏëëëÍŸ{½ÞpCCCØëõšUWWGœãäÉ“q>žõçÖ6X¯3º>Ñ×c=Ÿñ'??߬Y^^^XRøØ±c¯1žsèС”ïûÙ³gc‡#¢nƽv:aÇ>yò¤ù³¢¢"³­Æq<ù¾2^ge}Üz¼DïÇù¶w¾ÇwÖ÷lôýŸo{Œ÷I¼{ŽÔX2ÕÕÕaIáúúú„è²²²ø•ÛÄ¡¡¡¸!€ÑI5þäåå…ëëëcB#)**šWˆ“(„8vìXØáp$ ]Ξ=k† ÆŸ²²²ˆNt}¢C‰èޏõçÆõäåå%½žDuíéé ïÚµ+œ——.++3kf´ÕÚñ¶>¯ýÉç±ÖÁ¨³õÆý´ÖÌz®¡¡!3@±Ö³§§'aÈ–èxÉÞ===æÏÚÞdÇŸ˜˜ˆ9~^^^ºΧ=F}â3HÝšÛñ`UòûýÊÏÏ_ôºéfìt’®-s—’ÏçÓöíÛåp8â®í²fÍMLL,¸ÆÁ`P¡Phѵðù|iÛag9Ú›ì:­«²Ðöx½^y<žˆµw0?$á|>ŸvïÞ-·Û­žžž¸ëöövUWWǬ5b,:Ÿ5H°¼ü~¿<‚ÁàŠ W°CçoÍIÒ±cÇ"vÑimm5¿ŠX\´¡¡Aòù|t¼W(ŸÏ'¯×«¶¶¶e]“ÑGH óÕÔÔ¨µµÕüÞétFL§9yòdÌôŒåšÒ‚…3F`- Ø„ÏçS[[›‚Á ‚Á òóóåv»USSwfÀNH€íÝE €ÝÛ# ¶G@l€Ø [jllÔÝwß­5kÖhÍš5òx<«F0ÔöíÛµ}ûö˜Ÿ…B!ùý~Š4O$lÇçó©¡¡A¡PHyyyr8r:«ê=ìóù‡#ooo×}÷ݧP(D‘æi-%`7mmm’¤êêjók`5ÉÏÏW}}}ÄȧP($¯×+Ir»ÝižHØÎÀÀ€$©ªªŠb`Uòx<1ÓÂŒi5yyyŒˆZwDoo¯$©¨¨(¥Îœßï×äää¼^#Íþ«úÀÀ€òòò”ŸŸÑ‘L¶îH:Ï—èØñžc=oYYYÒsƒA ›âDçKÖÎT¯Ïz®¹Úµ˜ú-æ½äp8R9a¾ €#€‰6˜#rÈXŠ@(áÄD &á`#èûÇnshä±nÙëßSåªw M1oU7§ED%I¢‡ƒr]W‰H÷³^¯'Ëåû~ï~Q«ÕÊz–eJDÔápP»ÝnT—íçx>*Qžç©(ŠTE*˲îz*˲^½º>ÇqTš¦êp8¨,Ë”ã8JDTǯ®o>Ÿ÷®§iª\×íÚ1ŸÏ»¾é ‘çy]›t½úÌÅp’$ÝýÃç™tÛÒ4Ua*ÏóTš¦½ñp]׺`×Ï´ÍÙ°mfàåµóõÞàˆ~—ÇéÆÀ싼0ï7Ç8MÓɹmš¦{–3]N÷Ç÷ýQ`Å ’é¹ÎÏpÜš¦é*¶þè ÍðÐãñØý3çJ—Ñ¿·š¦éÚ;$#@$I’n‘¶Ûí¬‹÷áýp8t Fsq¨uza;ü+¾¹Ëa¸5Û2 Èè¦ã8£gšõ Û¯—Sõ™Ñ.‚ ˜ éö$Ib]ÌÛa–î0çóù|´P¶-Âõ®Ûøëº9g¯¯áµ÷Ðý ‚`Ô=¿išŽî7ÇR3wX˜ÏÒý·•3ûcÎùKó£ßõáüè Ó¥þ耛Ùó¹´SDZl]·ùüïŠc~\Îù¦iwʆ¶Z­º| :‡BÛ¶²\.ED¤(ŠQ×u»çEÑËÝ ¥iÚ=Û¤sB OÿØn·Ýó†ù%Ìútu}ú8U[}f{GIauŽŽ$Id½^[ÇÎì{]×’¦©ÌçóÑ8^Ê=¡ûE‘äy>™Äü½ÿõz=]w£û7›u¾ôؘóõu]Ë~¿ïž5ìËb±Çqz¿ó}_’$‘Íf3zž9×u]òy8Ž3:éhê}xi~lùDêºî½{Ãþ¬V«Ñ1Ô¶wX¿kæœ ûhöϬÛqœÉ÷ï;!I+€«ªëº "Øzán&Î,ŠBÚ¶Ïó&ýzÁ¨Ÿm.=ϳ.øÚ¶í‰æsó<—¶m%I’Éúâ8–ívÛ[dêúDZöÍlÏðºÙÛÂ]/¼Í|†“I;«ªêڶįSãoqÌ$¶u]‹ã8Ö…¾ˆÈ?OúãçyÞäýúÙæ|½‡«ù|nM”:›ÍFuL-þÛ¶í½wæØšcf (ÙÞ¿—æçééiô{|I’ÄÚŸ8ŽGýÑm³µ×VÿT€äþþ¾ëã[ü €wÐ ·(Š&aú/âf€DDäùùYnnnÞ\×Ôñ½Se×õ]:ýöHÖÏ[,¯ÛžkOlã¢Çd¸àÝï÷R…ÔuÝ n˜Ì2æNÛ¸˜ócל>óÞùzdxËI-z§DUU½]?¦á;¡'o= º,KÙï÷]]6fÛßzòŒ¾x”ï¥çÄq,½Ý.eYJQ“Á=$p¥É¥ ^°ÿ*ÿšãaÍàÂK½@¦‚¶²o©ïÒõK Zs×Ù¦årÙí8ðe²¿­æ¸^šŸ0 e¹\ŽæGbÞrð[ßa=Vú“.‘‡‡ùgGÎ5`&@ƒ@À¥¬^PêÅ^4n6›7íÐå¦ÊLH^óÙ‡mçÂKõ]ZüNµÅ¼fîêÈó\ò<Ïó¬yY̶ —>ý–¬lóš¾ÿ­óõ'^SOÛ¶Ýg$Öú³$[>KA ý>è{ôü8Ž#EQŒÚg]lmŸ ø ÇÚ6Ÿ/tÌO§t[˲” &?‰úŽHÒ àªÌ6:ᦹPÓaÎ3XðóçÏ^n ½p´%©ÔlÉ-Ed”ÔÓ°î¹”Ólã8oú„fj®I«ÕÊZF_7ƒ*—ú<œÛ3m£¶måÇrssÓÍ^ÀO}RRÅh¾þ”íÝÈó\nnnz¶m%‚Éü+—vÐLõGç\1?[Òó½Z­¬cmm'ÛXE!···½÷çR‚ÖáÜ›ôµ¶m{»G@€Àÿsi ”E!eYŽò èŪž Ÿy/eY¾é¯þ—þÊ®¹ÃÓJô‚R¢‘eY·¸}ÏÎ [[.å'yín 3A«Ùž©¤´ÚTG?ĸ¿¿—¶m{9eôý¶$¤UUÉr¹”²,?$¨çy½¶›}Õ ˆÑc<•EÏëp܆'MÕóÚÜ—èêÛ¥zÌþØæó5;^t½³ÙLêºî}þƒq";€kÙívJD”çyJDÔl6SçóY5M£6›r]W‰ˆ:½rçóY9Ž£DD- u:Ôù|î•IÓ´Wf>Ÿ+Q»ÝÎÚ–Ãá DDEQ4ºv:”ˆ(QëõZ5M£”Rª( †¡Á›êÓ׳,›lËð™šnËù|î~÷øø¨DDù¾¯ŽÇ£RJÆqXßñx¼X~f’$£ñ×ÏÛl6]]«ÕJ‰ˆrGN§ç+Ïó®móù¼WGÓ4]Ãù¿$Ë2%"ÊuÝnŽÇc7OæüÚú1¼_ÿØÆßó<庮*Šbô> ß#ý®›íRJõÆÀö>ØÊÏçÞ{§ßÇ©ùŒ¢¨û÷U–eon†ãf{·ðï¼3®%MÓnQ¨‚æã8zÑ=ü±ô‚Ò¶84ˆÃÀйPª/MÓn‘úÚúôus±<[[t°ÆqœÑµ &ÇÃÖ?ý»apâ5AÛ|é ­Ï—æËÖO½ØÏb]·{øEÑhžtÈv¯ÙGÛøŸÏç.¸gþ$I2ªG)¥’$™ìÿ¥¹xm¦ž1 ~ØæÓï©ßIZ\¹õ6›u§†Ôu-qw¿³™ÍfRU•EÑå’ÃP‹…µL’$’$Éäg¾ïK–e“Ÿ, ‰ã¸«O—yo}—®‡a8Ù×u%Ë2kUUIžçRUÕh m§ì¼Ôç8Ž»>N‡Nè†a7þSóU×u×¾—æKDd>Ÿ¿ëÓ›<Ïe±XtïR†DZµŸ:'ˆþœKß§ï~Fd¾³¾ïwc®Û9›Í&dz(ŠÞü˜ý7ß©Ký©ªjÔÆ—Þ›õz-¾ïw}±š¤?7rçCsÁü—Ü(¥Ãà* Žs?œÏçÉ?ÿ–õz-mÛ~ªd¡«ÕJ¶Û­dYöŸ $üüùSʲœ<Íó àJ^:żz·ÅgréTŸ¯Ê<Ö—àÈ4$®â5'kà{Ò»Fʲ´yË{û1ýØn·Ò¶­E!"ëû$®ºÐä(Q ¹®Û-Ú?“ÿÒ®'FÛívü[|9H\…™¨ó³ílÚ¶•ªªÄuÝ/¿ƒ¤®ë.u)2þðíýC¾;$àÛ#@¾=$àÛã˜_ŸÚ¯_¿DD$‚/sN]×òüü,žçqzðE°ƒÀ§U×µÄq,q©£‚×ëµÄq,yž3‰ÀA€À§UU•ˆü³{ä+ùýû·ˆˆÄqÌ$_ŸØø´t€$ Ã/ÕîÓéÄä_ ;H|ZeYŠ;1\;H\]]ײßïÅó—1Ã@CY–â8Ž”eÙ»æû~Ñe‡Šóù,eYÊjµê>g™Íf£:Íÿ­EQtõ¹®Û•5tÌ …~VUUR–¥¬×ë®~Û}""ËåRÚ¶•ù|.›Í¦»? Ã.˜Ò¶í—;áø¬¸:½ð7úšïûây^`–1ƒ q‹RJªªšÌ×a£wZ¬V«Q®³Œ-XáyÞhGŠyÝÌA2u,±eYv1Wˆ¾f‚6›Íè>ß÷»:Ø=| ’´¸*½{Ãó¼Þn Óíí­:è£?Û™ÍfÖ@ˆôv”øsH\•-h0uÏð¿Çíи»»ëí4ÑÌ\%¶üAŒNœ1¯Û§4¶ Žô°Õe š¼T¿YF÷o* $"ò÷ßÊx?$®Êö)ŠÉ v˜Ÿ˜ ÿ:i©ÎË¡wW˜GéÞÞގʽ ™Êu"2Ð Ë ȾvWÉKõëàÌÔ®­å¼Ê8N IDATW¥ûStŽ$Iº€€-¨°^¯»àÈðäé’½zžgÍ 2 Ðí{KPc*€óÚ]%&ÛI4Žãt;Dltÿ‡ïG’VWÓ¶m·CÄ–@µm[Ùï÷"ÒÏõaÛ‰a;Æ´ÝnGeDìó™º]¶­SAK»N^³«Ä6>fýúÛAUUu}%A+ðq¸3·È~¿ïýwÛ¶ÝQ¶I’¼ Ð»Bt bø[™©\&ÃëÃSgl»:L¶²¶]'UUYw•¼T¿Î=²ßï{A’²,»ÏŒ¦ž)òO°èááa4VZžçtø/âW£Øóù\ªª’¿þúK‹…¸®+EQH]×Á( 2þTfµZÉ~¿—ý~/ÏÏÏDZ´m+EQHÓ4EÑèsž×î1ƒmÛ^,w)ì°ÌT®’a_‡õ, )ËRöû½,—Kyxx¶m»`ÒÓÓÓÅ~éÝ8»ÝÎz}¹\¾êúãã#»Tðm°ƒÀÕ˜Aƒ²,%Š"Éó\6›4M#išJY–½d¤SÁ‰0 åp8ˆçyR–¥¬×kÙívE‘ÔuÝ-äÍ]*S'Ô\ P˜ÇþÚ’¤NµïR.“©@Æ¥ü$yžwýSJI²Ûíº£‚§vŘ»>lÏ}éº9~$€Åwr£”R €køñã‡Ôu-Çã±wÚL]×´øþˆg|U«ÕJ¶Ûíd²ZïÃWQ×µ5©ëºØøˆg|VëõZ~üøÑKZkÒŸ×ðé 𱸊©¤¸Ì÷}©ëÚšÔöîî®ËÛ¢?µð1¸Š—òoÀn±XHÒ¶­üõ×_r{{+777r{{+EQˆçy|Z\§Ø¸Š¶m%Š¢îÈZ¼^UU’ç¹EÑå[ñ}_f³Yw €E’Vðíñ‰ øö€o øö€o øö€oïD|× høIEND®B`‚ceilometer-6.1.5/doc/source/ceilo-arch.png0000664000567000056710000027047313072744706021624 0ustar jenkinsjenkins00000000000000‰PNG  IHDRy Ç${$bKGDÿÿÿ ½§“ pHYs  šœtIMEß ,Js¼ IDATxÚìÝw|TUþÿñWz˜Ò„„©ŠŠ " âR”ÛWËw×ùº_Ýuu× ûwýa×*H HQZB'tH(†$@Ò¤'óû#L$d’Lúdò~><Ä[ÎÜsîçܹóáÜsíŒF£iÖìÕ""""""""ÍŸ’<""""""""6@I $ˆˆˆˆˆˆˆˆ p¬lŪU«ˆŒŒ¬¶€Ù³gbv݉'xûí·«-#88˜9sæTº~þüùÄÅÅU[΂ ¨k}&NœÈ¤I“*]?kÖ¬F«Oc´­%õiŒ¶mìX©ªm3Vê«mÕ­»ZS¬¨Ö¼m­éš­~hýКbEý°ùÞ;©êÞIýP÷Nê‡Í«¾ð L›6°°°¦Iò\ºtÉ¢Ž?NVV–ÙuçÏŸ·¨ŒK—.±k×.êz,U•‘””dQIIIU–Ó˜õiŒ¶µTC·mcÇJUmÛ˜±R_m«~hÝýКbEý°æm«ïCõC[¾f«6ß{'õCÝ;©êÞIý°ùôÃèèhÒÒÒHHHhº$¿¿?={ö¬¶€ââbÒÓÓ+]gIƒ¡Ò2ðòòª¶œªÊpss³èXÜÜܪ,§1ëÓmkIѶ+UµmcÆJ}µ­ú¡u÷CkŠõÚ·­5]³Õm£ZS¬¨6ß{'õCÝ;©êÞIý°ùôÃääd‹Ñh4š[Ç©S§‘ÚÙ²e iiiÕ>ÒUÌŽäY²d qqq899Ñ¿+göíZ $$$Tú|œˆˆˆˆˆˆˆˆX½B]DDDDDDDÄ(É#"""""""b”ä±JòˆˆˆH³5~üxƯvP;ˆˆˆ•¼]KDDD~SXXÈ/¿üÂÎ;‰‹‹+{1‡‡Ý»wçÆoä–[nÁѱy­°aöoßÎ… HOO§¸¸˜Ö­[Ó¹sgFŽÉرcÍÖsåÊ•|ôÑG¬Y³¦EÖ_DDD¤2¸»»ãããÓàŸ¥»‘*=z”¹sç’’’Ra]JJ )))DEEñõ×_óüóÏÒ,ëyéÒ%^xáN:Ua]FFìß¿Ÿ 6ðÚk¯áêêZn›¸¸¸f}žëZÿ¦Öœ“k"""¶®ÿþ ýôS>Ì×_ÍÉ“')(( S§NÜ}÷ÝŒ=ºÂgìØ±€—^z‰:T¨çÔ©Séܹ3$%%•­[½z5~øaÙÿ›æ…™?>¡¡¡>|˜U«VqâÄ ÒÓÓ1xyyÑ·o_¦M›F§NÌÖ9&&†åË—sâÄ ®\¹‚¯¯/cÆŒaÊ”)ÎCe>üðCV¯^MXXÿøÇ?ªÜ¯¶õ¯í¹ÊÍÍåž{î);W7ndéÒ¥$$$àääD~~>ß|ó >';;›ûÑÈ×_Í}÷ݘÑSÓ¶´´×ï³|ùrŽ;Fff&xyyÊ„ šeâSDDÄf’cÅŠ|ñÅ<øàƒ888ðñÇ“˜˜Xî³rssyï½÷7n_~ù%‘‘‘|ôÑGeó­ZµªÂgðÞ{ï‘——gq;M˜0¡Üè‘5kÖ°fÍBCCÙ°aÎÎÎ<ôÐC|ùå—¬\¹’eË–1þ|‚ƒƒÉÏÏgáÂ…åÊ‹çÓO?ÅÁÁ§žzŠ%K–ðã?2oÞ<‚‚‚ˆ‹‹ãÛo¿­ò˜–.]Ê’%KèÑ£ÿüç?-š?§¶õ¯sµpáBn»í6>ýôS"##ùᇸùæ›ؾ}»ÙÏ3-¿å–[*=¦š¶eMë`òå—_RRRÂÔ©SY¸p!+W®dÅŠ|òÉ'ÜsÏ=8;;³~ýz]TDDD”äi¦Ñ¬ýû÷·xŸn¸0? qQQáááLž<___\\\/{ÌæÚÄIdd$………Ìœ9“‡zˆ:àììLûöí¹÷Þ{™1cÅÅÅeI;;;®\¹BŸ>}xê©§ðóóÃÁÁÎ;óä“OpæÌ™ ŸõðÃãêêJTT3gÎäÝwßeóæÍ\¼x±Ömøú믳|ùr¦OŸŽŸŸNNN´jÕŠÐÐPþüç?püøñrû¬^½š’’¦M›ÆÄ‰iÓ¦ ®®®ôíÛ—¿üå/¸¸¸päÈ‘J?sݺu|úé§téÒ…W_}777‹Žµ®õ¯é¹²··/;W;vä™gž! ÜÜÜÇÙÙ™˜˜._¾\î³®\¹Âþýûqvvfذa•SMÛ²¦u0¹páBYb¨]»v899áììL`` =öË–-ãå—_ÖEEDDDI‘¦azMº¯¯¯Åû´m۶ܾ×3θÖàÁƒøõ×_+¬;pà·Þz«ÙòL£8:TaÝ]wÝUa™iþ›+W®TXÂÛo¿Í€¸|ù2ëÖ­ãÍ7ßdæÌ™<úè£,X°Àì1Ö–éXrssË-7ÕeĈöéÖ­Ë—/gÞ¼yfËܶmï¿ÿ>AAA¼þúëUÎsTßõ¯Ë¹šrrr0 äåå±gÏ C† ©×¶¬mºwï΂ X¾|9[·nåøñã?~œ%K–àååÅ<`vòh‘–"++‹´´4BCCü³”ä1£G$''³oß>‹“<111•¾.º   Â$À¦G§Ì=ZÔªU+®\¹Â÷ß_å[ŸZ@@3gΤ{÷îüóŸÿdýúõ%y>ûì3ÒÒÒhÛ¶-=ô½{÷¦M›68::âààP–D¹–««+999dgg×h4އ‡sæÌaþüù,]º”Öhõ¯Ë¹ªlä—³³3ááá¬_¿žÝ»wsÓM7±{÷nòóó3fLµ‚×´-ëRoooyäyä’’’Ø»w/Û¶mãСC¼÷Þ{2iÒ$]XDDD˜Ù»Š¨¨(¢££‰W ‰ˆH‹dJ»|ùr³£/®—ŸŸÏÊ•+*}Uô¹sç*,KOOÌæèС@…·Õ·””¶nÝZí[¥üöXOuLs¼¼úê«ÜrË-´k׎V­ZáääDjjªÙ}Úµk@BBBêðî»ï2|øpžxâ JJJxë­·*©!êßPçÊ4Ó¶mÛÊîÑ®]^•š¶e}Õ! €;3yóæñ§?ý (}•»ˆˆˆ4¼J“<Û·oW’GDDZ¬‘#G@JJ o¼ñF•²ARR]ºt©ôYëÍ›7WX¶{÷nà·Ék¯e‰²téR³åíÙ³‡G}”E‹Õ©®/¾ø"¯¿þ:K–,©r»£GUOF}í\;¦Ç§|||*l÷õ×_WØ OŸ>lذ¡Â>§OŸæ®»îâ™gž©°Î”$»í¶Û¸é¦›HOOçwÞi´ú7Ô¹êß¿?^^^ìÝ»—¼¼Û·ogñâÅ€ù7 M˜0¶mÛÆ¼yóHJJ¢°°ôôt"##yã7HJJ2û¶¬š0=z´xñbæÏŸÏáÇÉÎΦ¸¸˜Ë—/súôi/^Ìk¯½À¸qã*”aztèÈ‘#eÇcJ\-\¸¬¬, 8q⯼ò ÙÙÙeoß¾½,Ñ3~üxìííY¿~=‹/&33“¼¼<<Èܹs)(( wïÞUÖçü#íÛ·gçÎ,_¾¼QêßPçÊÁÁQ£F‘››ËÒ¥K¹rå #GŽ´h2ãš¶emë0oÞ<¦OŸÎçŸιsç((( ¸¸˜ääd¾øâ :wî¬‹ŠˆˆH#°3šyÅÆüù󉋋ÃÇǧì_`DDDZ¢“'OòÆo˜}ÔêZ;vä…^0;Ïøñãqttäé§Ÿ&""¢ÂúÞ½{3oÞ<³?Ü·lÙÂ[o½evbf(ôöÍ7ß,{•öøñãX³fM¥?üÍ­ÿòË/Y¼xq¥oÞ2=z4³gÏ®p¬üã9yòdÙÿ¯Y³†M›6ñÖ[oU(Ã××—ˆˆ¾øâ ~ùå—rûüðÃ|öÙg•¶óüùóËæ0ª¬>ÇgΜ9ØÛÛóÎ;ïЭ[·*ëU×ú7Ĺº¶.Ï<ó ­Zµ"77—ˆˆˆ ‰®ÊʪI[Ö¦P:BmÞ¼y•¾‰ÎÙÙ™W^y…~ýúé‚"""-Ò–-[HKKcâĉ >G&^©B÷îÝùøãÙ´iÑÑÑœ:uªl®zôèÁðáë]QTTĘ1c°··géÒ¥$&&âææFxx8<òH¥ûŽ5ŠŽ;²téR>??¿²²¦L™BçÎùñlj%//???FŒÁôéÓË%*Ó³gOfΜÉÂ… ™;w.ï¿ÿ~…I¯ë³þ y®zö쉿¿?çÏŸ§]»vÕŽdºVMÛ²6u¸é¦›ðññ!22’cÇŽ‘••EII ^^^ôë×)S¦X  Êr,iÛúªO}´m}Ô§¾ÚÖšb¥ªciÌXiNý°1cEý°eôCk‰õÚ·­5]³Õuï¤~¨{'õCÝ;©êÞ©¾úa›6mš6ÉÓªU+ZµjUaùĉ-*ØÛÛ»Òu7Ýt}úô©¶Œ.]ºTZŽÑh´èX|||êåXª*£ÿþfÛêz=zô¨²œÆ¬O}´m}Ô§¾ÚÖšb¥ªciÌXiNý°1cEý°eôCk‰õÚ·­5]³Õuï¤~¨{'õCÝ;©êÞ©!ûaC°3FDDDDDDDD¤YÓÛµDDDDDDDDl€’<""""""""6@IiöÒÒÒX¸p!999j i±4'4k DDD››K@@sæÌÁ`0¨aDDDDDDêQZZ „……©1¬˜FòH³eJðtëÖ§žzŠÂÂB^yåÔ8""""""õèÀ|ôÑGDEE©1¬˜’<Ò,]›à7n®®®L:ooo"""”è©'ÙÙÙ”””0`À-Z¤DÓãZÒì\Ÿà¹Þºuë8uêÏ>û,AAAj0‘ZÈÉÉ¡¸¸˜;wRTT@||<ûöíã‰'žÐ£[VȦ“<«V­"66¶Vû>ôÐCøøø(B¬Œ)ÁÓµkWn¿ýöJ·S¢GDDDD¤¼´´4ýÆ‘ýöš?>C‡ÅÛۻܺóçÏÓ·o_ ¤†²26ä™5kV­÷8q"“&MR„X‘˜˜.\Xm‚ÇdíÚµœ>}Z‰iñNœ8ÁÛo¿ÍСCyä‘GÔ R%S‚§]»v 0 ÒíèÛ·¯ÌŠ8¶„JÚ:Ѻ½³EÛ¦¼¢¨°BQQQ,Z´ˆaÆnÑ>·ß~;k×®%""B‰iÑZ·ǹطo€=R)K<IIIäççãèè¨G·¬D‹Hò´nïL»°Öm«$õ1%xÆGŸ>}j´¯)ÑóꫯòàƒZœ ±III:tˆN:áééÉÖ­[iÕª÷ÝwŸG*øæ›o,Jð˜:tˆ;vè÷–•pTˆ5«K‚ÇäöÛo'00E‹èÂ#""""-æ^ÚÅÅ…ÔÔÔ²eŒ9ƒÁ@QQŽŽúI(¿ÉÎΦoß¾ØÛ[þ"nÿ²·né÷VÓS«þRªk‚ÇÄ´¿.<""""Ò’î¥ @§NÊ­óðð`çÎ :T‰J<;wî¬Q‚ÇÄcú½ÕôԛŪ¿”ê#Ác¢Dˆˆˆˆ´¤{is s?ꆮ†k¡øöÛoéÓ§O­<&¦XËÏÏW£6!%yÄê,Y²„M›6Ї‡ eëüüüpuuµ¸¬¼¼¼ ÃSCCC•è›cQ‚Ç$99™M›6qâÄ MÆÜ];Ér]<&:u"??ŸC‡é­[MDI±*999lÚ´ €#GŽpäÈ‘rëkòv-(}ú©S§Ì®[´haaa 5¼ˆˆˆˆØwwwn¼ñFüýý-ÚÞ`00räH¶nÝ è­[-IMÞ¢USIII8;;¢ÆnDJòˆU1 ,X°Àìºùóç׸¼üü|&NœÈ¤I“Ô¸""""bÓ’’’ˆ‹‹³8ÁcbšŒY‰ž–åÃ?,K𤦦’––€““;vÄÉÉ©Öe?~œ;vpþüy½u«‘)É#"""""ÒŒÅÄÄàîîN\\\­Ë0%zòóóõÖ­â©§ž¢uëÖìÝ»—ÄÄDŽ;F›6mðó󳸬3gÎpÛm·áååÀþýûqqq¡sçÎÄÇÇ+ÉÓˆÔsEDDDDDš)Ó$Ë5yD«2zëVËÒ±cDz¿;–±cÇ0kÖ¬—ÃèÑ£ñööàùçŸW7{5ˆˆˆˆˆHósí[´êšà¹–é­[GU#·@o¼ñFFñˆuQ’GDDDDD¤™1%x:vìXï?È ‰å½÷ÞãóÏ?Wc‹4#Jòˆˆˆˆˆˆ436làìÙ³dggÓ½{wºw³3çΫQY999\ºt©¬ 777vìØ”>º£i&ô¥ˆˆˆˆˆH3ó÷¿ÿÝìòcÇŽñ믿ҡC‹Ëºrå ‰‰‰L›6 €îÝ»3zôh5r uæÌ kôv­°°0|}}ÕxV@#yDDDDDDD€?þ˜ÌÌÌí3`À|||ÔxV@I $ˆˆˆˆˆˆˆˆ P’GDDDDDÄFôèу=zÔh777 ¤Æ“ZûÏþCBB‚ hâeiVŽ9Bbb¢ÅÛ§¦¦ªÑDDDD¤Å !33“ŒŒ ‹÷1 8P'tîÜggçísáÂrrrÔxV@Ii6&MšDlll÷ S㉈ˆˆˆˆX`Ö¬YìÚµK ÑL)É#ÍFHH!!!j34'ˆˆˆˆˆˆˆŠŠbÇŽ5Ú'++‹U«V©ñDl€’<""""""6"--´´´íSPPÀ¹sçÔxÀÆk<¿Îøñã RãY=®e¥""",ÚîÙgŸ­tÝ’%K,š¤xÚ´i•vÈ„„¾ûî»jË dúôé ZŸ¨¨(¢££«-cذa„‡‡×éXª«Oc¶m}Ô§º¶mÉÔO®>¶ÖO,mÛªêc±­ënó'k꫊këk}Y÷5[Äšlذ#F`0,ÞÇßß¿FÛ‹’<-Ž¥ ¯=Rù¬ùŽŸ!íÜ™jËøåh ~—Z›]—š˜bѱ¤].Ä£Šc©ú;–hQ9öžA\ò¨Û±TWŸÆlÛú¨¹¶½=ÔK 8÷+É §ÕO >¶ÐOjÓ¶××gHgw¼Ý­6¶uÝm^ñdM}µ)¾G×újè¶mªk¶ˆˆ’<6ÎÎ#в/ÚÃé•®+°÷ÂΣ°Ú2v'`Ÿi¾œ’ËK¦½W•ÇRõ)Êv±¨œSÙ.Ä×ñXª«Oc¶m}ÔÇ\Û*ÉSªÐÕ;õ“¨-ô“Ú´íõõéæëÒ$?,m]w›W}z¥ë—,YBbbbµåL›6   ³ëøî»ïª-cذa„‡‡×©>Ï>ûl¥ë¢¢¢ˆŽŽn”úÔWÛÖG}£m3Vªk[sõ ãÖ[o­°\I±Zy'NGlllµÛ^¼H–›[¥ëÏÄÄp&=½ÚrR7n¤¯¯ùu/Zt,¬*>Ë’2²"#+]—tâ„EåÔG}ê«më£>Ѷ+Õµ­¹2bcc•äpðêH«ÑÏñîônj ‘ÖjôsŠPð© IDATLÎÕ˜róô£]·¾ôq]ÞŠ]=<“n­¿¶w/cgÁÖ>þ´ïÞŸà¡·Ôwx½Æ±âFB=ÈÊʪÑ>nnn 4È*ë“@JZÅ”Îo""US’GŒÑXBÞåLŽD“p$š“»~bâìÿÃÞ¡ñÂN7MÒ”¶}3—NýGâÙ¾S£|^Ê™Ãõ¼ìŒ²Ï”—¾¡]·þVÑoÌ¿ú±õ;wb/kßÿ3Ùiç+¬»”šÈ¥ÔDâv¬!0ôFîxú}Zµñ¶Év¨,~¥ßÇ”“w9“¼Ë™\Œ?Æá‹ñëܛ۞˜‡o§žŠ±Z!!!dff’‘‘añ>ƒZe}–,YB\\AFc½L`+bë”ä‘zsý±¢Â|.§] .ú?ìZþ/ÎìÝȵ_rÄGÔXM¨är …±‰HlÅ´iÓ R£4;;Šòsùé_s˜òòbìíþGæiË~,þ6ŸE@¯!Öó#ù´íýØÉß÷-KN;3ú¦„‡‡Û\ý’ŽífÙ«PR\ˆw`wLø/‚ú Ãà釱¸˜ÔøãüéßÄF­&ñÈ–¿ñ0SÿùŽN.6×¶¿ÕŶ-~\{/SR\Dþ•K¤Ÿ;M|Ìfÿü©¿åû—ïåž¿¢]×>Š]³ED¬Ž’<ÒpÁåä‚gûN žü®m¼ØôÉß8¶u¹Ù$ÏÙCÛ‰Y³ˆ 'cÈ¿’‹Á¶]Cé;fÝ×þUŽ×? S˜—ÿꇧ|g=IÇv³s餜9BQA>AÁÜpÇÃôqWù/ýœl¢¿{‡“;ב% ïÀ`†Mû3Ãnâ³'Gp9ýO,<€³«õ¿ñÃX˜GIf±™““£@m .w:öNÜŽ5ì]õ ƒïz¼FûפO\ÿ ?ö÷ q?ååÅüðò½åúÀйñëþŸ+í#•-H8Íþ5 ¹p2†‚+Ù´öiO¯Q÷0pÒc8:»˜Müé+.œ<À•ÌŒ%FÜ<} è=„Aw=ŽO`pµÇÐsP¥ÇS“6ªm߯«’Ì2!­O/›‹ñ¢‚|Ö~ð %Å…„Œ¸‹±¿‰ƒ£Óo8A‡t@×AcY÷álRÎaߪOrÏSö=p>n?û"?ç܉=äegâÒº þÝøaÂ#öZé~–Ä|uñ[Óºä]ᣇú—Åê±-ËÙù)éI§ptvá‰/bJ뻟ýk¾àBì~r²Ò°wpÄàáK‡^ƒè7fíƒÃôü·„ï{GZµñ& 7=1`⣬yïiÎÚÆš÷þÄ̈µ88:7ÈuÏÒrÌ9¶åGöF~Jæù_q1¸Óeàh†ß;Çì(ºÚ|Nmb¯6}°)bÜ–¯Ù"¢$4‘œœ)É<‡]k?ì]m¢^ÝeÓ'#ãÜé ëö®ú„m_Ï-·,7;ø[‰?°•Aw?Îð{çÔÏ ›£ãÕ'¹œ=ÅÊ7¥¸¨ l}ÊéìûðY ¾q|é~q+Þ|„ó±û¯Ùî+çý;Ÿû˜‚ÜË¥¿gœ[)€›™ØØX®¤¦bÌ7`çÚ¦^Ë.ÌËá–ÿú'IÇ÷°óû÷érÃ-øv ±h߯ì–ÚÿŸ/Øòï×Áh,[–•|–ß¿Ë齘úò’r‰žã[—³î_ÿSn{€ì´óߺ‚“;×]}T¬_­Ž§¦mT›¾¯Ø®Ú±-˸œ~¯Ý3ëõò žëôvNÆp1þ8žþ]5æn^ʆ/`,)þ­ì¬4NïÝÈé}›¸ù¡¿ÓÜÌ:Ç|½ÅªÃo±z|Û ~ú×oë r 8¹s-ÿyïéru*.* +å,Y)g9±m%ãŸ~îCÆéš]\[{0éþÿž3ž¬ä³Ûü#}n^ï×½Ú–cïàÈÑÍKYÿÑ_~»¯ÌÊçȦïH9}˜é¯.-×Okó9µ‰½ÚôÁ–ãõ%**ŠÝ»wbñ>YYY¬ZµŠ|P (bå:^^¸®$OsÀÛo¿ €ó ÷âàÕÑ&êe,)J'1¼Vjü1¶;ìì8é1zßô;Úøp%#…بÕD÷{V, ëÀ[ñ¾¡îI§«7ÊWØøñ_ =•Aw>Ž›—éçN³aÁó$Ÿ<ÀŸ¾*û¡½šó±ûqtve̬7è:h ùW²ØõãGl^ô*EyWë¦I¥››ˆˆˆÒ açpœºŽ¨×²KŠ‹håîÅ­½Êª·f±îÿæpïkK+ü«ïõjÓ'úA¿±3,šÄù®¿|ÔlÂç´„8¶~õ&ööŽÜôà_ 6GgWRNbã'/’rú0»~ü?§Ïþí¦~Ë8:»2dòSôyŠ‹ ¹œÍ‹^!åôa¶/Žàž¿.ªÑñ×¶jÓ÷ÛU;³¯tDØ w8:»ÐmðزÇ#Oíú©ÎŸSÓØ«mTŒ‹ˆÔž’<Ò`Š‹ ÉJ>ËÞUŸ²õ«7èsë½å¶9wbOéò[¦™-£çÈ»8»¯Þ/ìöŠÃQ}‚JŸ=ÏÏÉ.[fúÑ×uÐ3eÌÔ‰–jz௸ûv`ÏÊ«½ÁoÊ>Q™¤c»ÌŽpñëÜ›§bÊß¿¶¸<Ó…yWjuÒ0Lóy™{ü¶¾®{µ-çÚGžLÚø“u±ÎŸSÓØ«mTŒ‹ˆ(É#Vù ÀCÚvíCß[ï5û/¢¦ùœ\ f‹0-7mWŸ,}VqAéë¦ÍM¬l:/R >ÃèÛýX÷o¶}3›ú»Ùíš²OTÆ4“³Åûd¥$ðýK÷r%£þÿ¶®mÔÞ‚×Úøp)5‘¬”‹'oì˜/̳ìÍO¹Wêó Q7ƒ‡¯Ùåm»„23b-ûÿóq;Öp!.† q1ì^þO?†M{†>£§)HˆéQ¢ëÏO}]÷êR޹ë›iάâ‚:NMc¯¶}P1^7ááá8:Öìgž§§' Pã‰Ø%y¤ÞX2k…\—Vä^¦0/ÇlÂÄtsPÙ rc°wt¢¸0Ÿ¢Â|œ\Z]wSrY'^,6|ÆsÄØÊuÿ¦ë 1t43—‰5ö 'W¹—ÉËÎÄàácÑ>Û¾žÇ•ŒdÜýŸþ,Bâê£öö޼?£Gí§\7Z‚vÝúr)5‘_÷ÿR§$OCžO'W9Ù<þÙ>\ÜÚ4hÌ7DݪšÐßÍÓ3žcÄŒçÈ8ÿ+gn%nÇ’ŽíbãÇ/P\X@ÿq÷+PÀé½ðÐ ×½º”STW!ž J“ˆ× ­íçÔ$öjÛãuããヷ·7–_¯œœð÷÷·Êú„‡‡Óh×âÎerÍÿŸ½ný¯Uì›uõÏõ\ŒFÚ]«°*®Àµdw¬äïb}4'4)ÓâôDó "ÓrÓvMÁÐÆ€ì‹ß8|úN¢ÔèßmO¾…=>ú ù9Ù†œ[cŸ0}VƹSï“x$€»ÿ÷szޏ“6~8»ºáàè\çGÍšÃu£%è6ø6ö¯YHÞå¬j·?wb_?7‘c[–7Úùôlßéjìžnð˜oÊXõòïLÿq3™òÒ7Œ~ìÕÒóòŸÏ¤ 'ó"1kJß6uý›êëºW—r2“ã+,3=~èæÕ¶Þ·ºØ«mTŒËµÂÃÃÛ£ýl0sˆ¶므OF^^Þ¾ºÌôgËuÎVñ§²oæ|;»*÷;{Íq]ûY×Çë×ü1-ûáj=Q1%õ+ ø57—ØØX%yÄúô ÀáMKÌ®?òË÷¥ÛõÔdÇèX:×P|Ìæ ëbÖ|ÙìÚÜÞ½-Î7ÜËìÙ³ R62ÿ70pÒ£d§gó¢Wpºnh}}ô‰’:ΑRY?=ºùÇ ëRãñáÌP–¼8¥ÜrÓc­½ÛUØgçïÿ¶]QA¿9\7Lœo¸—é?Exx¸ÍÅrðãñôïBNf*«ßyªÊI«SÎá?ïþ‘‹g“|ú`£ÏŽ}†°7ò3³ëÙ¢?!jÉÛuŽysñÛ±ÚcØ€Ÿ¬¶%~\ÉHażÇÈ»œI»nýè:ðÖ¹îÕ¥œØ¨È ËÎìû¿Î½êíx-½ÚöÁ¦Šq[¾fKÓ's¶«ŒÆ²$ÎÛ×$G¶»¯n—bÁ(ow#Áæÿ …‰Ã*ÿ3,”J÷½ö%®O ­º&a4òÕÕÖ¶«Ûi*õº;,LJ"""Âìz=®%MªïØÚð-Gùƒ§/¡·L¥µW;²Ó.plË2Žüü=öŽô3£Éޱó ·`+;—}ˆGûNôBaÞv-ûWƒÌ7ÒÐì]qðêHHH7`¹qêÓœÙÿ Ç6/«0’§.}ÂÁÉ…âÂ|Î߃_çÞõ6gTŸ[ïåàO_stóx¶ëHè­Ópr1|ê ?öÅ…ùø÷(ÿØ‚W‡.¤œ9Âöoçsã”?áäêÆÅ³ÇÙ³bÆ’b<Úv$+å,'w®£ûÐq88:[|üÍáºQvN¼:ÔÅÛ{tÌÞÁ‘ñz—^¾Ä#;øjÎxLz”Îa7áîÓ¢‚\2/ÄsbÛJÿüEù¹´íÊðûþ§ÑÎgß±÷³îKNî\ÃÚf3tÊŸhã×¼ìLNíYÏöoÞ¢ ÷2×%¨jóæâ·¡ê¶öƒÙÄÜJè-Sé3z:­½ÛcïàÀåô ìYQúZ麵wºƒ“‹³‡¶³{ùG½½ÎÇ[ÓØ«mlª·•köªU«Ø½{w’U©©©,X°€çŸ^7jõÐI¡ôQªd£‘ük“6U$pZ¹@ ¯ƒ+µ-ÝÎÇ|®>éhp ròìêx´5ÛÿDÂo½æï'Ζ&ƒr ìHL-¿Ïµ£†®×è|õ¿m)}…­<÷‡¿£3ãž|‹Åý%Å…õÖ'|»“ræKÿù{ vód™ãÛ1„á3þ‡m_Ï%jIQKÊÿ«w@w†L~²Ü²&<ºŸåàO_qð§¯Ê–·önÏÔ~GÔ·o‘•r–µ<”«¥Çß®-%¶Ûv eÊËß²æ½?‘y!ž-‹^eË¢WÍnÛmðXÆ=Qan³†<Ÿmüûø\Ö}ø,'¶¯äÄö•fë0lúì:Ǽ¹øm¨ºu8šØèÕì]ù1{W~\ñãäˆßÿEq] Õ½)Ô¯soÆ?ý¾Ù7¯Õ×u¯¦åü÷WÇJoê]6}6ë>œÍÏŸ¿\îØ:ôD÷!·×ùxk{µíƒ-5Æ¥ù&uâx£±â(œëþßÛ݈Oéh‡Áý®OÞØYm]C‚Ìÿ}RxùcNHœüÒDPNž‘„H¼hGn~ùò®Oþ´5édgG' à¡ðR’ÇV1gÎþ¼äT‹©sØøñ êÁ¾ÕŸ“|òù9—piíA‡¸áއ è5¤IÏÉÕÀ=/~Ŷ¯çröà6JŠ‹®Þ1꿲þ£ÿårF2Þõû/ì'=†OPûÿóɧR˜—ƒ»?Ýo¼Áw?QaÔMÏw‘w9‹k¿äRjCodØ´ghãÛ¡Sž&ãÜiÒâp÷õ¯ññ[ûu£%ÅvÛ.¡Ü?-Ç·­àôž ¤ž9BÎ¥tÀHk¯ötè5ˆþ·Í¤]·¾Mr>{ »ïÀîì‹ü”Ä#;Èɼˆ½ƒ#^]é1l"ýoŸYöö¡ºÄ|eñÛu ŸHk¯v\ÿ5çc÷‘›±¸ƒ—½‡2pâc6™älŠk¶ƒ“ O_ü»‡<ìº ¾ »J¾óëëºWÓrJŠ ¯Þ¯¸ÑsÄì]ù1çÏàâÖ†îCÆ~ï³&ò®ÍñÖ&öjÓ[jŒKóp8a.©sݵ!8Àˆ¯§AmK“9A~`pµýß ¦„Ui"¨|}O$@Ú%HH6›X~äOŠ)”>¶ÆÕ$OG£‘;;z(ìjÄÎh4mµr³f•ŽºhÛÏva­-ÚçЗ¥ßLœ8‘I“&5Ù±·¤$Os–•’ÀÂ?Ý‚‹[ÿl_³:öw§ëq- É’<Òr®•˜˜hµke¬\É¥Õ«›ìócF#±Pþñ«k éhG ò#]¤j'JGýœ8k$.É|Ûº\å£G»¶P:ÇÀ‚ *¬×H‘jäçd“yþ ¾z–=÷n·c P:t_DDDDDlC2p8hšWçºäNÿn¥I@?ó£VÄ2!W“b¦Ç¾R 6±4é›Xú˜W¾±”&Û\ŒFzCììh§æ3KI‘j,{õRN¢Ç° ùÝãÑ®#yÙ™Äí\Söö Ó›DDDDDšÒ¤I“hÛ¶-ïãççÇ„ Öy?;þ|âââ2™ÙS$Ä[¹f¾˜«ŸÙʺC ëÖ2½j AmKÿÜ: ´}cN–þ9qÖHz¶ùvv¢ôUíÀ( ¯š­%yDª1jæó,ó¿ˆ^MltÅa¢BÒwìŒfScQÆË©ÄƈÁ`ÐIi %™ $üšƒ·c|||Ô bS±­ïÑ5[lI,¥¯úκnyÿnFÂûئûM"¬;WÛÞ®,ásróKÏÕ*J_R²ç7JòˆT# ×¦¿òû"?åì¡íä^JÇÁÑOÿλƒÆ?Œƒ£Só¹ÉN¡`ÿb"öÁìÙ³ ÑIi ùû¾eÉ>¸ÒÄó¼‰4Dlë{DtÍ[`J\û¦'o÷ÒÄέ4bÇš˜>9ys Vm/Ýc:‡;FÆ^}CWK¦$ˆ|;†pÛ“o©!DDDDĪ¥¥¥‘žž^é›àÌ),,äüùóx{{·¨¶ÚMéèow#“†ÛªÄŽ53¸Bx(„‡–ŽîY‰©¥oèú Œµáú{\]q2?÷’E¾¡8ú÷Qƒˆb[Dq-ÒìEEEÕøíZ™™™¬\¹’ÐÐÐÓN«ŒF]3ßέ~›üWšÓ螨#ðÝÏ¥qí¦tâì)Øæ›¸úƒñŸ3Çìz%y¬LNN±±± óÔ " ÈÔ×ÝÔ¢ØQ\‹H ñ{5ÁèOÞ >mÔ.ÍYxh鄨 ×9pÊŽ³ÀWF#÷ÛÙµ¸W®Û+DDDDDDš—„„bccÉÉÉ)·<--­VååååUØ·²ÏhζP:É2À°Pxñ%xl…Áž¼ÛŽaW¤¥ØÙ±Þhlqí ‘<""""""ÍÌ_|ARRwÜqþþþeË£££éÔ©fSÏ:;;“––Æ÷ßπʖGFF’œœÌßþö7›x“^°íêßýà¡ÛG¶Èt^£À!;;úA‹šŒY#yDDDDDDš™9sæàïïÏöíÛINN&==ôôt&Ož\.Qc &NœHçÎËÊÙ¿?ÉÉÉ<øàƒU2Áks³åš¿?y·bÈ–M»¹t®%€­-¬îJòH½Yûá³¼wowVÌ}Ì&ê³öKÞ»·»N¬¨¿©¯‰ˆˆXƒÁÀsÏ=‡Á` ##£^Ë.,,äðáÃ<øàƒ5šÀÙÚ™^“Þ¿›QhÙzÿp…a½åÎ{K¡Çµ¤^ä^JçäÎ58º´">f3—.ž£o‡f]§”3‡ubEýM}MDDÄzÈ ¼üòËdgg³sçNŠŠŠê¥ÜÎ;3wî\«xDkΜ9d¬\É¥Õ«ë\VÖÕÿúxè-Z-Ag;Ø_ú÷ ¨…Ô[#y¤^ùåŠ 6íŒÆo\Üìë”rÚ6xÚ9¹bçHpp°M<[­þÖüû›­ö5;@;uÅÇÇG+6Ûú]³­‡»»;C‡Åѱîÿ†@ß¾}mº'¤(Þ[‚¤‹¿ý=Û†êuX˜”DDD„ÙõÉ#uf49¼a1î¾ÿ û"?ãè/?pã”?aïPyˆåçdýÝ;œÜ¹Žü+Yx3lÚŸévŸ=9‚Ëéxbáœ]Ý*ì{>n?û"?ç܉=äegâÒº þÝøaÂ#öZnÛ¼þõP?<ý»ðà;ëI:¶›K? åÌŠ òð æ†;¦çˆ»J;Íúoøù³¿—íozŒdÊË‹ è9ˆó±ûÙ¿æ .Äî''+ {G ¾tè5ˆ~cfÐ>8̪ϗ}붸œÁœéݼêoÕö·¦ìk@³ïo®gpïÍþ·Óa±-ú[ëæ|ÍvwwÇÍÍü‘‘#GâääT£ýãããÉÉÉáöÛm6â éF@£ylݵÉ<7ªW&ðkn.ÄÆšÿ½§S/u`+Y)gé>{{z޼‹+)œÚ½¾Ò}JŠ‹Xñæ#Xû%W2’)*È#åô!VÎû¿Æl¦ ÷2Nέ*ì{tóR¾é^Nî\CNf*%Å…äf¥qzïF–¾r?Öý»|_ý¢‚\ÎŽâÇ×$ápùW²(.Ì'åôaÖ}ø,q;ÖT[ד;×òýË÷ý²ÓÎS\T@a~Y)g9¶yß¿|/'w­SPˆMô·¦ìkêo"""5׿ [·n¥°°Ðòû‹øxöíÛÇСC[D;eçØq"AñbËÒ.ÁÑ_[fÝ5’ÇÊ„„„°`Áþ¼äT³9æC뿠רÉô¾éwì]ù1‡6|KðãÍó±ûqtve̬7è:h ùWbn*< IDAT²ØõãGl^ô*EyØÙ—ÏCf&dzéÓt÷ã„Þ<•ÖÞí¹’™BìöHvüð>[¾|Ž}‡ãÕ¡kéO‡ÒÅ(È½ÂÆÿJèè© ºóqܼüH?wš ž'ùäüôÁ7ާߨô;£lTÁÓ‹O–}þ—³Ça,)fгè;ö>ܼÚb4ɾxŽÃshÃbŽþ²”îCÆ)˜› ðþÆDN_ÌW»®¿5u_ˆZòŽú[ ŠmŵHÝ™&cž7o[·nµhD)Áck“,Wç£FžfGP[Å­ÉɃ-o¹£µ4’Gê$ûâyÎìÿ…vÝúá €w@7Ú‡‘p$šŒó¿šÝÏ4ê`Ð]³> '—V´önÏèÿúží;SRl~Ò¸ƒë¾*‹dêŸ~ï<ÛwÂÑÙ¶A žüC÷ß”qèš9JììJ;wAN6½sË#ÿÀÝ×{G|ƒzpËÃ/p1þXµõ½”’Pö£·_ ŽÎ8:¹àåß…‘÷?Ï“ pçs+0¤Ù÷·¦îkêo"""µsí[·:Tõ᜜™àÈÍ·cáZ£æç±19yñ‘ÄÔ–û8ž’ Ã!$H±Óܤ]*=—1'ÁôˆÖ´[ ЯeÎË£‘ ‰çÒ¥KÍ"Á¥Ižõ±±°ð‰, ÏN+MÄ&ÂÛßÁÂ5FMÊÜL¤]*=_/|bJð€·»‘'î‚[´ÜvÑ?ƒJ­\ÿ 'w®)›8ÕœC¾¥çÈò|Ø;:Q\˜OQa>N.­®ûÑx¹Ò²œ\ ädóøgûpqkÓ$õvóôcÄŒç1ã92ÎÿÊÙƒ[‰Û±†¤c»Øøñ ÐÜýV{ÞŠ3ÎR°1³6ÁìÙ³ Q0«¿Ye_³…þ–»ió7Áĉ™4i’YlFî¦yú›Œk[½f·nÝš‚‚6mÚDNN>ø N8¥#z^|6îƒUQ›ÑGíˆ> =!¼Oé£]Wµ•5‰9 ÷–&æ®\yâ0¸u€]‹?_É#µ’žtŠÄ«svTç܉½\Lˆ-·ÌÐÆ€ì‹ç*lŸ|ºò â<Ûw ãÜi«h/ÿÎô7“)/}ÃèÇ^`ÿ>W€H³ïoÖÖ×ÔßDDDjÏ4G§§g‹œd¹:·€×-M´ºúÔ{l",\[:ÇËÂ5Ʋ‘"Ò4RJÉúó‡F>ZaJð” ¯?“•ä±:iiiDGGSôkíû`çÚÆ*óÐ†Ò ^ûŽ¹Ñ¾Rév›½JÌš…Zÿ ·<òrÙrïÀîd§'>f3ÞÝÊí³æËJËëØg8)§³7ò3&<óA…õ¿ÆlaóÂ<ì§Ϯ—º–”coïPív=†MбÍLdd$©g.QìäƒWG«=ΦèoÖÜ×Ôßl'¶E×"Ç`0ðÒK/©!*k×Ò$Á­JGöD6’žmGn~éÈžè£ÐÊÅHX7 ¶£G  -æ$ÄĉM´#í’iiéÈV.¥ç*¼ø´iYíÒè€÷ôéf×+Éce.^¼ÈªU«JÃ×#+LòæçòÿÙ»ó𨪃àß;K23„l“fB– l""‹hµ­ˆT¡ÕW~.Ô*Ôö¥Õ¾oíÛ·‹±µµ¥hUpCx]*‹b+¨,‰‚`€faË d²3Ì’Yîï;3™!L€$³|?Ï“'“™I2sî9÷ÞósÎ=²ý]‚ Kî¿ès'ÜöCø×ëøzçû˜z÷Pª4€‘fàäþøâÝç‘1ív¿û7œk½p§­à¦¨øèUÔ~ñ!¶üåq\ûÝ!>ulgÛpôËc×›@§ÕŒÎ ¯Þs1re,\;Ný%RGŽA¬f(¶üåqœ<°cg|ù3ç#.92¹æ–z|ù¾t)ç”,[Þ¶¦Y²†ÁjoƒÝÖ°½ExÝ&b½&¢P{æ ¨2凤°ÁjG@àHSºò²€\½t›.Oµ¨6Uuþ#u/…>~´ˆâ|…ÙÑ[N RÕjd^`º4Cê{ã+Ûû¹ä|s¶oJDžħ Cnq ¾ÞñOT•mBþÌ;c§û·¬A[ýIlüÿÎ^ îøÅëø¿_ÞÙóßKÕá¦ÿ÷;|ôü2TíÚ€ª]º='íª±(º# ´ºl4?„w~µðè[µ¸jÒLT—oÆÞ /`ï†zì¬N]øSV ûö6Øm Û ª<½ôe±I¡CEmWàã %ü¦ éREäé_èÑ>ÖÜ!MÁª6U†®Ëž÷düh…9 G•ÀÂëCê3ï°“æ- êù“æ=€¯w¾ƒ¿éët*UÜñäëØùÆïPw`'Ü.§§Ã¸ Ùž‹# =7àÜ¢9HÖecߦÀxèsXÚš “+4|r‹J0þÖ{º]BúRL»÷çø÷ÊŸÁÜÚà›â’W\‚¸¤tø÷8S½Ö³­].h’R¡s-&•ü´úVŠˆö6˜m툈ˆB…FfÃ7z¤¢¨ª“¦M]Ï3š¤Ÿ·î“~VNJЧ ÈË’¦éR¥Åž£Mµhnï v ð›~œ?ZG— äêDäeyGì0Øé †<Ôg ~ó^ŸžŸ¢ÏÅ£kkºÝ?T›‰Ù?úS·ûÛ¥k^ìRê)ú\Üüàïƒ~ Þ‘}y|øÕS°øÏÛz¸2†_=™¢¢½ f[c{#""¢P$>Rð`±“wš‘ˆšS]„Õ.tí#…"4±ôiR€¤K•¾‡ó”¯æé«Úà)“FÀ`aµ÷Ðä —\= OõŽ€b°s©òР°[΢íÌq¤Œø䊘€Çj>ÿ<<›EÄöFDDD²4ª®i]s‹¥`¢ÊMRÐÑÔüðMM:?üñò{¼A Ò&t=¦ïŸE‡ €µ³ëgï(@z?ÍÒë÷;Ö=¨Ñ¥Úx)ÔÑ¥JåÆ@çÊbÈCƒâÝ_ß‹Æc‘[t¦|ç$¤gÁv¶ 5_|ˆ/Þþ³´s+ºEÄöFDDDQlþüùhüøc¸>ÿ{`}üñǤ<ðøÉÖÊpKŽÔu4¨ªªb…! Q yˆˆˆˆˆˆBœw‘å‚ta@¯é£dÝ^  ¼nHJBIIIsºVˆ©ªªÂ³Ï> ˆ™päIY,¢~²dÉiG8²ÊQSY ĺMÄzM²¬V+@—08ŸÓ«•’Ô@«UZšˆÇ9Z-2çÎíñqŽä!""""" Ƀxód5ËŸ(Ô1ä!"""""""Š yˆˆˆˆˆˆˆˆ"C"""""""¢À‡ˆˆˆˆˆˆB’^¯ÇUÉÉHgQ…W×""""""¢4þ|´ªÕèØ¼™…A†›ˆˆ!O¤v8mpÛ W}% #¸L5p™jà¨Ù …~W]A¡bÁ„Êö©¯DgõVv~#aßi6Ái6ÁiØ Y¢Š«®ƒ<)+:ËÂiƒË¸Žº=¬ÛR·5[òŒ|(GM½ì ZNNNµÚáP'rƒ]Cž“——‡U«VáÇëŽ^òßpž©”NäüNÊ“Ô@AºµR@NŠÀ‚V‡c»V‡ˆƒ "Z­žíkØ ç™J(sfA‘™Ï‚º «V­ÂŸ·q¬éÒ:°¢­öïLÉR)€‚ ’Õ€V# ™†…S€¥SDm³G[¤ûÜmt~õäùˆ3'ªê¶ÛÜû¾µÝŽ#ÙZ©nëdP+EVœ0PÓ$ÂêQۜ«¾®zÏqD?)lÞËòåË/«^… ‹Õ‰«ç¬Ç ßEÂИn y/@¬z€E=bÈa5[á4ìõýœŸ.àŽ|’Õ,›pT!Ü‘´X7¾rHP§Ž#ö(F³ Áùà$50;WŽ)z^´0ek½·ä°:D|zÌÏŽ»asJb›¹±DÅ:ç™Jiÿâ1:˜'G¶öüºÍ «nKÇ‘«\ØctûÎÜgÂ.Ä$"Št¯þ³)I*¼ôö×xüãX Ô'ìDÎÃø•x¤HŽû'3à‰Éj`i±É¡òijŽc;á8¶“…3ÀÜæFØw¯ö<·äÈðËYJ<B­0;OŽ_ÎR`t²tŸh6Á¾o-D§-¢ß»À£Rw—ci±²‡€‡Âõ8²°PŽŸLS`X¼Ò¹ê+ÑyøQˆE}óVÿïtüý­#p»9r–ú†gmtbî]gX¼€'nàIy¤ÊÖÊðÄ Jß ºóDœg¸öÒ€xmÒ¿Nðì<9 &©•–+1Y'íKE³ Ž#FìûuµÖ,÷aëêÛ`±9±ôé]€¥ öN7–>½ ÷|+gv~m_þfþûù½xã™™hþâ^|ïÖQøÁŠOñ«¿îÛ¥3a*¿s¦eáÑÿ)óý¯{ú ¾=k$Nïø>Nm_ˆoω%Oí`E !ìDJ§Óci±j%OÌ£4}Ë“ò8íAw<]­u°nû=–,Y‚ªª*d8ÏTÂÝf MÑbÀ=î+ëê ÷=m˺í÷xæ©Ç°qãÆÐ®Ûu_„— x¢ƒZ)tûÀ X<ŽP$ —}6E®#G[ÑÐdÅÌo\M&ÎYœ½Ž”)_÷m,½'Ú$”J²G$à—OÄæO¥ð^´¶ÛQ2# wÜ|T± hÔ X¬N<õðDäç&cˆF‰GïÍÇÉSf<ýè5›ÓußW‡›ý^c¾?/µCãbð袔¯û67^a%̹Zë|Îi#ž˜G™áñB׃úJ¸Í,”þì—> IRÓGq÷má;Æz¶¹Ó.…"B´uÀyBú„nt2^F™d5pÃU]S9ý—ˆhð<÷j%½7ðê¹-.Àskú¾oÖgÆ¡¹-ðj„·^¯ïö¼IcS»Ž ‰±€ cRîk?Ûð7–ýîs¼üö×0œ1s£… ^]+Ä ¬_¿öF+”¹³ ‹K»èó]~'c\$:ÍΓû®”â<}1¹7²P‚TZZ c«Δ±½^ŽÞmn„hk—ÊJ†ÝF7Z­€ËTÝk ‡zMDnZÚlxíý¬zë~ðŸŸ<¦Š•ãw?¹)I=_åÓÞéÂoþþÞßz†z3ΞsÀétw{^jkzhÔ]‘€ ¼Ïkõo§ã™—÷ã·/îÇý¿ØŽ[¦êñüS×atV<7â™àÆìldþä'=>Î'ÄX,TWWDGïÓ¼£xòÓv:£T²Zúôýh àn: 0ä š·­)† ïõ¹ÎÓ|· 2ØÖ¢Õµz9޶¸ ÚÚá67öćCÝvµH :&©Áû£”Z)  ]Àö"ÜMµ±Ï&" 7/¬ÿË~0¿zôšnýê¯{±ê­Ãøùƒ{üݻۊ1ÙIø¿çnDj² Ï<ÜØ‚—®øëÔ¨xêáIxêáI8fè@[G'.߆Ï×sÊV¨àÙ\óY0.ƒ›2šË”>}mí‰çAkog¥©pÃâ¨F³ì”®}­«±:2ê¶§S_ÎzÍrRºFqqf"¢åtºñ×7aáÜì¿»$+×Ãáîññ-; X±¤9#‹˜9¶ì0ôûë¥Ç˜ìDTÖ´p#†&a̤O²†å͆ûŽô†t…ÛÛ9“ÔÉOfYD³H¾r¡vO ¢ú8’ÐòñjDDëíŽ!=E¼Q‰=>ž="ºŒ!xû£c=>žwU"^ýg ¬6' gÌXùæa¬\{#‡ÅѺ+»OŸ¶pž½gͰÙXõÖL)HãF !<£ g®®…´8²€¨Ÿy®<ĶF£=A_$,tî?bc8§ÒG5ÿS´¶±@ˆˆПÖT^p×¹ÙxîÕž`~åoÀkjòÍW1ñöwñùþ¼ñÌ ,¹ëjŒ›÷ö}­üÏ"¬Ý|S_GÆu¯ããòSxåoàF !\“'Œ¹;üNÎÙñŒfÜþD/˜uÓˆˆˆˆzÌz6KïÉÇÒ{¤EçŪ›0&eo}«ÛïüìBüìÂçRï›”ŸŠ]k¿ÅÂòEŽ.!"""¢H´|ùr´nØ€ŽÍ›YDA`ÈCe¥ B‚Ùijh4\̉¨_Û[‚Ãc ÕjYqu›Çâ>›ˆhàpºµq›6¡¤¤¤Ûã yˆ¢Œ,. ªIwcùüÑ, ¢~¦št7îšž‰œtv„)òê6#Ä}6ÑÀ; `gs3°qc!^&"""""""ŠÉbôz=üq<ÿÉiȆòRtDýéñÇÇ»ûL¨ï Öm"Ök""¢°Ç'Äh4äååA^ÁMCÔßòòò0Ä8B“…A¬ÛD¬×DDDaÓµˆˆˆˆˆˆˆˆ"‡‹QH*++ÃéšÄÇâ êC"""""" Ieee¨©©^1NX D½àt-¢(ã67¾o-JKKa0X DýȾo-Ö½ü<ÊÊÊXqu›Çâ>›ˆ(ôp$Q”6¸Û ¨n, „¨¹Û 0´ÍùW³0(âê6#Ä}6QèaÈb, ŒF#Üm§!Ä¥BP¨X(Dý¤ººçL&ˆv U< „X·‰X¯‰ˆˆBÚxÙÇ#yþügÈb ž}öY@Ì„» OÊb¡õ“ÒÒRiG8²ÊQSY ĺMÄzMDDÒ¤ªÕÈÌËëñq®ÉCDDDDDDDòE†£2‚þ;šy…eÃÃÝn‡ªpm<Ž\Y¢­¢­¢ÃñœÖ¸-mmíÝžën3ôÏù_$(U M÷lìXȆ¦yz±Å¥Eìvà>;ò¥h`h,WU×µÓ£è}3ä 1999¨m´BPFæ§.on«BJ¼kþ}?úvá ½Ž¹OmÀÆ_Í ¸ï¹~…?½[nˇ(²>FºœœœjµÃ¡Nd[c{cÝDϾ³¯mýÿ³¸¯åß—[Díé6|vðnüÙ»ø¿_ÌÁ¤œÐ8=ë©._*¶èÙg»Í­íÏ™›þ l.…h6Á¿ºL½¬â |dCÓ (Õ%ê!¨ ¨âYY)de‰"û 04ú4–I¤²Ø€òÃÒm½(Bô„z yBŒ^¯ÇòåËñãuG#òý‰¢ˆUÄ+ËnÆÝ¿ýÌ™lpÜÑ3Ý?Û~ð4>ùÃw02=O¿±›2Â-_¾Þjı&;ÛÛëö Ù[Ó€UDÙçCßõáÆ5¹é¸&7±J96}qV;`h”n7wMm"Œë³• IDATMBÀº€4*Èe6u $KÔCž<²D=d‰\ω^€›MJ×3è‰D«?Q~XÚŸeïŸ! ¨¿n<€Çî˜X:o<ž~sw·Žç»;kñô›»±æ'7#gX"¶|y÷þá#Ø._ÀóÁîãøåkŸcÕ£3Qtu&öÕ6â¾g?†.5ÓÇ飔£ÃÒ‰ÿøÓV¬X0kǃÍáÂ/_-Ç“k>‡eÃÃa±VQ¶µ`ÚÛå´µÇWíÀÿýbÛ]Ô®C§±â®ÉA=÷í5øÛ¦X³ü&\•Œ£gÚqÿ³#V)Cɵ£zýýÞê3l?x ?}y^|t®ÉMC»¥kþ}ýå“ ÖåÞ^WŒB»Ã…e/ìÀÝÓó°fùÍp‡ðü,ÑídżH¨ãn«ƒ«µ®×:ºT@/BŸ&@ŸhãýœðùTY£ ‰ 2Õ®÷Qe‚Ÿævi½“æŽî£Îù#Ä¥Bž”Yb–oÔQà$€ƒž çׯ~‹S·"Asð·Š0š<º(b®}k/1ä¡óµ¡m߉ôuc‡áœÍƒ'šP02% sú»û®ÃÄliOû½i9hî°âñvøžó—÷÷ã÷÷OŲ×çÇÓ‹Šð⇕¾¿o±;ñó“1{òHÀPO.¼S–¾ÅAlkA¶7¶5êoõ­èRâ‚zî‹Vâ·÷]盺U02¿^\Œß­ÿ2¨'˜úüÜ{_áw?¼Ec2)ñj,ûÎD,ûÎÄK~]‚  ÕlÇìÉ#ñ­âÐ^¬XE¸N퇨›È7¤éWî¦Z8«.êä ¡Oò²„° s.Wàh!! Óeh”¦‚UƒÑl‚Ól {Byf§w‘Ï3Ï<ƒššèE÷\¡û\H£zvzêè¯_ŠÆˆ¸sGõ„#‹ ØöðñÞ®)Z@T< yh@ýuã<4w\À}K¿5Ûp+4Ówß±3혒—ð¼yE£BžŠc&̯ xÎ ã†ã©WËî»>XÀÏI4ŸµEõvð^fµºÚN†WˆÖ¶L{c[»Ìb›†$+†A«Õ²rö@ô¨–ƒÇ›qíyõurn:lê÷ƒ©Ï{kñÂoìÓ{öuÝ41+ ¶‡Ñiƒm÷+ˆ¸à‚mw›!b#ns#\gÂeªíñêV€êäe ÈÕ{à ^¥çB´ñÒWa¶àëŒLÒ:AUu"jNu•7ôqöBP%@–¨‡"ëš |¸ÏŽ.ÓdØÀ ü°€Š£"nœ$`æ0ì °ÿ(°±L 뱞m;9ŠË…! ˆ–³6¬ý¤ /m9„%Ïm xL#ÇÓ‹‹¯4Ÿµ!^ðœ´ÄÀH³ÕÝ—ºýŸX¥<àç8uL·“רïtžmDçWo¡tðøã#//4JÛZ0ímíòØ÷­Åº}À¹’Ì;—Ò]JŽžiÇØ½w¨ÎZ;»Õסj%ÌVGPÿ+˜úÜvÎŽx²Oï!Ø×•š ém!Š"AÚ¯Óû¾µ zìûÖFÔqDtÚà:sÎ3z±£KŠó¥ï u.wúWž¾k  *`4e•ðômípÕ·ÃU_ !.ŠÌqgŽõ]æûlºr< àߦoYí6–I¡Aѳ& œÆ‚š;€òC#wizÖ ˆ™päIYaÿžœ.7V}P‰Í¿š×ããó§åà–Ÿÿß1J…Ù™ Ø]Ý€Y…]•öíWg˜”“Žò#gpˤ¬4tÉ–,Y"íGC9jjÔµ5½¶7¶5Öíþ¶øæ1¸ãW›p÷Œ<èS‡v{üï~…£gÚñüÃÓ1~T*ÊŸÁm×^å{üó¯ë1 þW0õ¹pt*>ÞW‡…3¿ô{¸Ü׊äécàj8 8ípù@ªOƒô\Éz-:mpToƒ«¾2à~1€FÅÑ:ƒA£ŠÇÅcXl@ùáÀ>®úJ¸ê+¥°'wf¿Žì¡è2ÀA@;€íFöÒâÌë?ÖÂÀg 5wµÒôΊZï½]ûæQÄ”( w¼Æ¸F§Cæòå=>.cÕ¡þöÞ®£HKT#W—Ôã㣇%b¸6ï•üð–±xjM9ŽÔµÀlíÄ;;kñÖgÕ¿óðÜqx|Õvì¨<«Ý‰3Íçð_¯}ŽïüjSЯk˜vöÕ6âœÍÁDQÙÖ‚ioW¢­±½ÑÅ\û |oZnüÙ»xgg-ÚÏÙqÎæÀþc&üvÝ—xö}¸sZŽÔÉŸ“Ÿ½¼ _V7Àjwb_m#~þJ–ÜVÔÿ ¦>ÿøöBübu9>=`„ÃéBS‡zï+Ìú黬˗ûºB‰è]I!fÂ]±Ç‘àô,Ž½°•­ xŠÆß HëÁÐ岨¤ò\ÿ ðô«b@{Å‹"¦Bšf÷=A?†ìŽ#y¨ß=¿a?îº!÷¢Ï¹kz.þºá‹ûgçC€’§6ଵ×ÉÄëOÜ‚±¼î{þÍ“Fà¿ïù&_µÕ§Ú0$V‰…:<»dZЯëï?š‰ïýú8]nœ|í‡ÒÉļ¿ž\øýÌË?S¤µ5½¶·+ÑÖØÞ¨7\2 ׆—?:„ýíSXìN O‰ÃŒñ:|ú‡ï`ô°DÀ·ŠGÃÔnŽøN5›‘•:Ý1ßòŠUÁÔçëó‡ãÙ%×ã‰w¢úT+’âT˜>^‡×ž¸ù‚uùR_W(·w›1cæ@˜¸ûÖBtÚá¨Ù ÷ÙÄŒ™–õLtÚÐyཀKxæóùp Oß*m¯eÒzðÔK癈¸€£zèŠRAZ¼w2€Uª4zoî¶î“¾i?’«ëZ”û•Þ¥QzÞ«ï†eB@°“ `¼ ë>2ä¡Á÷Ù3ßíõ9–ŒÃƒ%]WºïÖ|Üwk×°ð6³½ÛB¯ß½>ß½>ç‚óB'ÈÞûoœ…£«õ;D‘ÚÖ‚io—ÛÖØÞ¨7‚ à{Órð½i9½>÷þÙù¸v~Põ±§:Ö[}€Û¯ËÆí×e÷øXOu¹¯¯+”Û€w]ÑÖÑÖY\b&.€}ÿ»€½®úJt†as›Ñyx³oQe]ªpAÕ𣗶ݬ‰ÀºmÒÕ¹D³ öÝk3îv^zúEºçk€vHaÏ×¢ˆFA€w5¯æŽ®u|@+BŸ* /Kª·ºÔèÞç¥22š¤ÛUÿE“»7zQÄ7Yƒ>bÈC!gCù1Ü<) ª˜®ê¹§º¹ÃY8DloD4@ÜæÈUñÅ¥AuíbØ÷­…h6u[Ç&ÔyGðx/‡>s‚ˆù3Ùawú4`ù]Ò•6•KÁ¤}ßZ¨Š—pDõ«xFøx‚‡uNˆ"êüB«]@µ¨6þ¾.UDJ‚tÕ.u¬T—Õ1‘5wH_†F ¹½+Øéy:[×~8@–(b¤ ÒúH`°sÉòPÈy}ÛרSÝ€Go/DœJ‰½5xâ;±â®É,"¶7" ®–“§H£ž… ±ø‚@Z¿Çn·‡üû°ï[ë xÝ*-èK‘cn1 MÖlïŠpª)?`ÁD½^gs3´ÍÍ!ùú¼£|Î}ê´‰" ç…F“£)p/ïèÈó»þŽ.µk­0müÀNóišÛ»n7µ‰hîº='ñ¢ˆžõt¼åÇPçÊaÈC!çÅÏÂÏ_)Ô½{§ ³&èñÒc7bb‡ß±½Q¿‹ ØÏÂ}¶1ànoÐã½*• X¿~=²³³¡ÑhBò­¸šj|¡TI‘tÕ&Š<Åc«]Z U4›àDq¾ÀË¢Gâ|AZŸÇ3z‹ûl u*Àw ðó  !@ÔvÞãí¦†yÙD‚€NJ©n7äøû*\xqcopsþm8ƒ§ À «¨®îñq† N@ zV®\ ‹Å2¨ï'F XíJ׋¨¨åö$µ@ézV»€%Ë# kiñ]Z<š•ø9=Þñ†>%ð» Eß1œE@]\­u°nû=–,Y‚ªª*Q?²nû=žyê1lܸ‘…AaGH‚šžÖ幨qD£Ñ`Ù²e¾ §¢¢¥¥¥ƒôÜ:PÇJAÏÊ÷õŸ·q8³Ø¤í¸ò}i»ªc¸Ï¦È—à wü×Þñ_‹g²(J—c§¨Å‡ˆˆˆˆº‘{..šû¾Ð‰7è)**ÆA z²‡Ëî’‡J£­û€ÿQÎuzÂRù!iûmÝ'ý¬Ž•¶ïØ‘,Š\;¼$hº­Åã xÒÜÄ)ZQ!OˆinnƦM›àÕjEii)–-[½^?àõZ£ß*ë?‘®ÆÔÜ!ÝÞX&¢x¬10†F)Ü);$ú‚@•µx¶€<=ˈ"“M±C°Ç/ÈI‡´.Oº @%ŠqòbÈCDDDDÝ „¸Tˆf\­u¸œ5m/^ŒÜÜ\¬Y³Æô,^¼………ƒòÞòôÀ“÷J‹ö~ü¥ˆšS¬v[÷I£z´ñÀ¬I@®ŽÏ`hî¶ÍÖ½ÒmOä Q\  x,§¤Pä:)ŠØ$h÷ü¬0Ò¶à v®P-ŠhiNÓ"†…Ù@aŽúPÿ¨65R¸ÓÜÊEc¥¯<=;²¹z½“%Š˜+ÝFé¨7AZ«‡ëð?† è)//€z¼´ñÀ¬‰À¬‰‚oäIUˆj£àåF“ô³w¤ öÑÆ‹HI”Âm|t†FÀÚ):Mm"š;L &{×ÖñÒ¥¹:)ØÉÕ;¬hþüùhüøc¸>ÿ<âÞÛQÄ¿8z§àú:tYò„1ÁïÊ[-7’Õ2 Q?;åY+"®Zå?J£Å@Ëí­¬±ëü"&ðS#!Q±©nsÃýŸz½¿ùÍoPZZ £Ñè»úÖ¢E‹ ÑhBª|4ª®õ{¼ª R˜QU'-ÜìOéÓ½“&…]‹9çz® .!PsG×z9ÍíÒíª:ÀbïitŽpÞw@ èR<ŽgŠÜùÏ!êi_§Õ¢#‚ÞS;€¢ˆ:¿0'WQŽÌ¡Þpºµq›6¡¤¤¤Ûã y"æäŒ;ƒhÖbñë´ùðê‰<) ê™OàOóG³àú@HÐAl7zFMÉY QÌæ þ¹ê™Oàáé™ÈIׄf½Vt-rÑl¹q£Ø©¿gHjÀc²¸4¸›j!šM6 Õ;Žh4߈£ÑˆŠŠ 455aÙ²e!ôœÏ»ŽÏÜbé¬Ê ­1ÓÔ&ÂЛXí¿ãæUQ{á¿ë]ÜÙ? ò§Kíy}š ­t¡õnüYlä…;Ù›tßÿy}¤M¾+•…î9k¨ï³)2ì°]a÷„9 žpgà ÒI;››òDyR–oõz7 2ØñŒVêÝž[lÀ/ºrdêD¸Ú¾QÖwu„eñéñž¼—È>XïÆì<G¢UM“_ݘ,È’²€ew›ò”œ+ú¿½AÏêÕ«±ÿ~F”––†EÐã/Ï72%°£Veð%íÒ¨kgàt¯óù‡2 ƒúyÏô3u©ÒÔ4}šm‚4"‰#tˆºkðLͪßÚ;“E׃£wèÊbÈbRRRPRR‚-•-½ŽÈº†P¬çè‚heuˆ¨m–NÎeWøÄ;Ò•””à‹chÖësåi9pÕWÂæ”:ú<G#ÿ@UÂí­Ou;%N³ §ÏJûµ’u;ôÖí¸Ô€^@àÔDwGC¿Ô}Fƒ‡z«W¯Fyy9ŒF#V¬XeË–A¯×÷¹^‡’®À£kÔ—¡°Ø«]º HAWO£BòPÑ7},%QðÝö†8 QÄýÑÅì°Ã/ÈáèêO yBŒV«Åܹs±Õr4¨çËSsánª…Í ì6¸1EÏuy¢Í§Çܾé#ò4†<}1wî\ßj„¹©÷3hY¢Þ7rî³ã.dp÷mZ¬À£§#œ¨˜º-OË…Ó3JãÝCn,,äѦ¶ÙÓg='†ÃÆõøïˆ/Wk”ýøZ¼ /{×çñŽèÑëõ}ª×áÂ:Va¶§ý_¼Ó×Ü4µ÷ü˜X†2“×m—ÆŽ'Ñ•Ð ŠØ Ñ/̹Àõ w¨±—î03Žc;{Þ=äBA†ÀOa£ˆÕ!â³ãžNg‚.¤G„;A¡‚\7 Îe¨m9š' ½w¨k1ž˜Ü#æ}ÉâÒ|kNí1º1}” ÃãY·£É›.Ï~.òŒ±=דD=\fÄvc¿¿žÅ‹#77kÖ¬ñ=‹/Faaa·çŠà²CP%DÍöêm‘foXDDƒÇ&ŠØ!Øãæ¤(E¤3à¡þ>·c„¿˜¼YÒÎÄ ¼ô¥‹EþRæôâQŽšÊégЬk (boT8*¥ÈöÙq·o=yF~Ä­};¶kѾ7¾r\i‰"ÛN´X=û¸QS»MÕò0ÍðÝv›ûýucÑ¢E«ÕŠ•+W¢¬¬¬Ûóì_½Çñ]ÜDÌ`0àXs3ÂäõžE¼$ØãùYiôÎ}˜> ‹ üÉSr Ïȇ«¾µÍ"Þ¨pba!7m4œ˜{‡×Ë3ò#ârΡNP¨ 3Þ“BÕ=N;ÞðŠXÄŒ¿ã‚#zâ!ÐM‚»£®úJ_Ðsÿ†˜‘ê gWÀ—ŠØñw\ôù‚*ˆìpw4Ày¢l@Ž#………¾K¬[­VœÚ½òŒc€¸ÎTúžç¨Ù†Ø‰ ¸a鲄Ó>›ú À&Õ—òË‚Q!x‚» àt/¿Rçù¾c“2ÚøF+õ;AÀAH—žŸ+Èe5½ø9‹ rÄN\û¾µÍ&Ô6‹øÃv'fçÉ1YÇYy‘bÑw*]¾)ZÁœ˜ŸÏmmƒãØNl<äää0ä¹ÄΰØi…óDlNàÛ˜¬pÇX«‘¢¶Ù-Õnß•ëE,b&.èóèDZØu HÒ(¢Ã3f:_ÐóûϘ1ZŽi#¹Þ[¤8X/â½C]S´„¸TÄŒ¹-¨ðR64 n{Üm¸ê+ì8¢×ë‚W}e÷c[›®Ö:Žj¥ËnûlêŸðâ%þk›gddôéo¸\Ò‡Cryp20›Í0›Íhð€ï!b¼®pêRʨ¯¼ed¼ à&“Y]/ÜWa„‹Å£ÑwÛé/cz1‚B…؉ à¨ÞW}%Z¬À.|XåBA† ÙZÉ#ÂÈ©-ѳЯÛwRHS´ú2‚‡º«®®Æ9“ ¢]Óç5V”£¦BP'ÂY³¢ÓŽ=FëÈI0.C†$µ€l-ÛZ¸h±-§:D|Qçò”óïÄô”P¨Û1cæÀ¡Š÷…˜V¹ðE0Z+ÕmG³n¬w£¦ÉX·tˆíÃè4yò¸›j!ÚÚü}èõzüð‡?Äßÿñ \vKÏqßŇˆ.ËvQD»gNnn.ŠŠŠÛïÿ÷ĉøôÓOÑÙÙ‰¢ˆ‡!d§%mGWÀ“‘‘[n¹e@ÊèôéÓøôÓOa6›±]‘+HˆÒz:€<) C¯»®ÇÇò„ƒÁ€gŸ}V:ÑžpWŸOV… 1cæÀ™”å»êV‹UšrðÙq–oDˆGLÞ,^Ië (--•v„#‹/iájE¦´RgõÇp7ÕÂæ”>%?XÏÐ# ˆ…\7ɳà¶*ªê¶rÔTÈ<DZÝ(F{Œ¬Û‘R·9³ ÈÌïÛï IÔ×ýüóÏû¦@ô„£yˆèr4øÒ³1b¦OŸ>`ÿ{äÈ‘¸ùæ›±iÓ&ØÛE7‡à"Í6;=·“““1oÞ¼û߯ Ã-·Ü‚wÞyvAÀ¿Dߋ҅¬GÈÑj‘9wnÏ}6çȤÈ̇"3) u IDATÎ3•p™ªánªe¡„9YJ6䩹}>)§~î,©â;î¸Zëà:S wS D§ÎÛ4.²D=”YS"î*Z}!OÊ‚|ÒÝ¾ãˆØf`Ý÷º ƒ"=òŒ±—\ú‡'þkN „ªªª€ÿçÿÿýos4]*ÿ«w dÀãbddd ¾¾>d¯$æÿº®¹æšÿÿZ­cÇŽÅ¡C‡¦‹ÑyY‹ Â7°'ì6¸Ï6.;Ü ,˜0!óœ¨ò„5L:Äžíä67BtØ ž3Aì´²p¡ó«N„ Š‡ÌóºGÀÕ*R¹[yj6Ç‘øt@ Ùд+2"MHÐAl7è{0 X¹r¥ïçó&ÿÛî6œgB‘YÀOD}R/Š€ &&f@¦õdذa¨¯¯‡!DG¨ø_!läÈ‘ƒòRS¥Q¥vA€ ¼ÚVçn,‚(éÀ(T¾(§ùõs§Ê»v Ã9Š0rÏÜ¿ Mƒ«Ý8 £x, rss±ÿ~霦‡ÿ8š§Œ!õ™÷cðäädF‹‹‹ Øf#X$ÝÕ,"""" †\v(t“‚?™tÚ šM¨®vA§Ó…ÄUQÂÍùÏd wŒ•õîP8JV å˜'Ç?ö8qºC”Ö£±¶!vâ‚>=î6 ',HV ƒV« ù÷èBxö N[GÀ‰ùý“¾5(2y?UyÆâuþ¢ŸÀ ÿuJî/Ç=w£‘L­°°PŽÑɸäÎpXGœ¶€Ñiü  :êöÒ"¹/èqTo…«µî’þV¯ËóÐCA­VwÕWÏbÊÞ/ï} Ä¥AyÕuµ­ÒS4˜qí0lØv2àþ ÛNbú”aHOÑ`ã¶“Xñìnüá‰oÂT~/^|úz<ùÜlûübbäè0;°è§ŸâÑ{óqzûBTm¹©É*,}zâ†(}SÆÄªðÖ»¦ïÝûÓOðíY#qzÇ÷qjûB|{ÖH,yjÑ J0B¥BNNÏÓ’yb JKKaß·nïÕ!zá8¶3àÄØ§ß ‹ýiÓ¦Mü¦e7M ªøˆ›¦å/1>³¾9 |&M©Û²Ãˆ×Cb¼Tþf‹Úk_ ”9õu44u­c7Dð7… ®R´ú·Ó¡Ë‚ß¾¸#f¼‰Ù÷ˆ£uü@‰ˆ(”ñêZáÞñl: ÈO8M+J%«¥Å¶Owˆ¾©DÔáÆ®Ìžhu­^Ž£-.ˆ¶v¸ÍÅ¥…Ýö|X¤×á‰Rj¥€‚ öÝ}>Žèt:=z´ßFòX,¬\¹20D’)·@WP“3 ‚RÑÛiAI6Þû÷ ,(ÉÆ†m'q—gª$ Áñ­ sEÿ§F­ÀSOÂSOÂ1CÚ::±pù6|¾žS¶ˆˆBÏæÂ˜ÿÈ‚œnÊèîxv]ý'Ô× ÛŽ°gAÒañGñD±l¿}m¤„ªî¦Z@A:ëu4—á©ÛN{Ÿ`NMM -¾l±X®èk2 X±bE@À#äécEl×ÉlJ6ä©9¿æÎ·×áý­'°aÛ Ì›9Â÷Øä‚Ô^××¹\£ôñ“ˆÊš6"¢Æd Œù¯Ù3<žåÍü·¿ûl# ¤?Ø¥K×f'³(¢™ÿˆI±ÓQïM;„§Q}Ið ù\ö O¯×ûn_É)[[·nůýkßeÓ‹ŠŠ| ±CºXVÄ"&wVTl#Z’#ðó?îÁìiY¢éš~õã{ ðȯvá³Ý§aµ9qºá~þÇ=(Y²%ø:>{+M8gqøî›¶pž½gͰÙXõÖL)Hcƒ!" aœ®!8²€¨yGͱ­Ñèdàh ‚¾b(ó±Á ¢›€éîh€<%¸‘1þ!Á`@aaáe½‹Å‚õë×£¼¼ÜwߢE‹P\\pŸ"³î6dqiT a]öþWÑêñøã·ó‚ÛFã͵øõ'<çÖizüæñ)xäé]¨:ÞŽ!jn,ŽçŸ ~¢—sæ=øN7Ëïüñ?‹ð£ÿ)ÃOŸÙ ¥B†ë¯ÉÀ+ÿ{ ¨åË—£uÃtlÞÌ Cž0æ89gÇ3šõeû J„²ÓÔÐh4,<¢KÌ…ÝÿgïÎ㛪óýñ¿²µI(miÚB!)HI#Pi\¨¸QÆ[@«Ž :*#¸ 2:_î½?™{G-3׫£Œ+ "0âh˦¶Šb£‚Ø"‚MÚMØÚ¦;IÓ&9¿?BBC[š.i“ôõ|Íl5Ž~}—Ïòi øfýMê\ó˜MDÁn€ýG""/O>ùd§åLò…Þ´.G%B>ãN,ÏMaàˆL>ãNÜ~U´£™P¥ð+ÛžóHjj*Ìf3ŒFcŸß¯¤¤ï¾û®·{–V«Å’%Køcñ˜MDt–‡m6 ›nÒLòQŸÝe«»–7ÝÙ¸q# ½ggg÷«EÑpÆ$O^(-[¶ ¯|q â‘ØŽ(–-[†Í{kp¢mƒA,ÛD}¤Óé¼ ƒßIž³§GW(xøá‡}Þ嚈ˆ¨w˜ä 2J¥:’î¢Á¸1aQ­Á –m¢>R©Tˆ‹‹C]]L&“_¯1™LÈËËóvÏR«ÕX²dÉ9ÇBa¹&""ê3 DDDDÔ/uuu~M£^XXˆ7zÿž5k-ZÄ  &yˆˆˆˆ¨_t:JKKa±X`±Xºl‘söôè …·Ýv233@"êVqq1Žˆ0á ê“NÈ„lâl„X¶‰úA§Ó¡¨¨ðôÓO£­­ €Ó£û#??ß[®y}DDDÃU €…IÝüpÂ$õ[jjª÷±'ÁÃéщˆˆ“È.[ý)×Z†…Žû9\ÏíDD=a’‡ÝûEeˆV`õg‡ö&è?>éôÜ¢¼Ïœ8…ÿ3Ç×?ˆUOdáOë¿Ç§?áŽ#Ö5Ö7 •îÅïߨ…‡æ¥áà÷àÐêû°ö©k1at4æþa3~0ž šmíª,÷U°×Çñý°ï]¡¥Æ}Ñ? ‘ÞqTâ9_§T*¡V«Á [‘Ñnþ)|™ÝE"i$ƒADÓ<4¨AÀª­?aÕãYxsû~¸\ÂmKE¿/Í™†‡æ¥!I5‘2 .˜ÿ}øJ<óN1w±®±¾Qü`<‰U[Âçÿ3ó.>#•ˆŠÄÌÔÑxrÁ…X¶àB|w(h¶·¢—-‹Î%˜ë@»±í·wK@Yj"§Í‡H*÷ëõžÖG副nËrOÛe?]Oª¬ÈýËVŒ½ãMÄÎ-hë€ãh ¦¸[DDL¿Ò^Þ4{Æå±ÙlC:•º4y¦·UÇk 0U³Î…#k«{ÿzʬ4y&ƒF6lØ€~û->ƒÈŸsC@ƒéÕü}øÝüé€GoLÇóïœK'ú¬³yW9žÿ{¬þý5ÐŽÅö=GpÏ‹;`w8!9}1¼õûCøÏµßbÕãs0krö–Wã7+?‡:! WMS#B&A“µ þ½ÏÜqÖO‹Öv'þs+V ë'K¡¼ñUX?YÚã6ëÇÌÔÑa³„Ö&8OìGAÁAÌš5ËgÚ[^uÍŸúÖŸº¶lÕרôÇyú¾9Cÿe\SlðÙpóÍÏÇðÌíùµî¿¾6âû°zù¯099ÇñÀÊÏ)#û’‰=¾¾§ò _ýtO¿ý Þx< 3SÑhmÃêÏbÉÿ}ÑmYîi»"¤bØÛxòŸ_ãΫtX½ü¸º¹Yê: DíV÷ÍrT"/¼£ËÖ;ŽÃÅçÝMþïãR¼ðÀlÌžŒr.O‡çï…7¶í÷®cµ;ðìaÞE0R„Vüú|{ð¸ßÛlijÅã¯íÄŠ;/›ýà²5 ½ròóóQ[[Ë‚9Œëš?õm°êZ¸Ö·öÊ]ø¦hû€>nNÔ[¡ŽòkÝ7¶íÇÿüæ2ÌÐŽ†2R† &ÄãO‹2ñ÷Jüz½?åù?ú½ÿ2Ìš’™T‚øhž\p!¶ÿùæ>o—H$B}‹×_47e¦@!…2R”u@$rß(KÔB~ñ}ÝÞ,÷tÑh4P(0äå_¯…lò<€¹F„¼ ÷²î…ƒÂ½@ÞFæšÓ žÉó ‰×ò˜MDLò †Wó÷aIÎ4Ÿç½)ÿødŸÏs•Çq±nŒÏs7Îòý…¶¤²W§«}ž»rÚ8ì1øöÖ½<ÍwšÕ1£”°4·úµ½¦šf\óÿ6ã‰ùÓqéä$î@ »ºæO}ŒºÆú6¼‰Dè¶UËÙ~:dÁ%g•׋RGãç#¿^ïOyþÁXÝë2èïvýêÂä ®‚g?ˆ¥ˆHÛï÷ó ¾<”ݵ<¤IiMž‘46»¿ò6pœžPUfrï¿_6»"i$d“çsÖ7"¢p1À•£F!;;»ësCd'­²2¬\¹1ýö°™þ±®¹ë¿(Ã[ÛÆâÿ-òY&àùE³íþÅÏÒÜŠhe„Ï:‰±JŸ¿[líPÿú­NŸ)“øü¥ˆ8ëfBä×öî-¯Æÿ½ +î¼weÏ‚¦/^ì>NÈ„lâìaW×ü©o®k¬o,Ûêø(ToÄÔñ=wm¶µu*¯#2´ØÚýú,ÊsÃ);¢•²^}·+!FÔuÀ[o]8ë«ú} ¢Óé`4a6›aµZ¡T*‡´\K“Ò ™ˆ¶[ ´ÔÀ`VnRÕ@v& ÓðØô×É& hï™–w·Âˆ)7ô8ëQ¸@«R!)'§ëóCDƒáí?ãñ›3°â×—tZö—vã­í?ãéÛfzoDOµ¶cd‡ æšF›ÏkbFDàÀ÷tº¨?VÔàÖ?mÅOdaN¯ø‚ÕjE^^àÉ'Ÿì÷MÃp®kþÔ·@Ö5Ö7€Ë¦ŒEÁw‡üJòD+#ÐpÊŽ¸‘gº5ZÛü.Ÿþ”gU´u-vŒŽõÿØÒŸí Ö:à<¾¿ßIÏàË€»ËVFFÆ/qT"ä߇ù8*wApؽÉU4“ ¤§J9ëfМ÷[Ò àóÎtËN°Øé;ÞÒœiX¶ê+|½ÿ(lvŽ[Ná¿Ö~‹Ïø½]cU#°·¼§ZÏ4§ÿÏ5zA³­ —MIÂ{O]‹©½ç]ÿšãñÿÝ})–­ú †£ )ÃÕj¬\|…ßÛõúcsp럶ÂátáÈÚûk Ë%`ù_wZ÷ÿÝîWwšD»nõ¾®è±¾ D]c}£žümñ¸lÊX¼½ãg<ö/aµ;0.> W§«ñå‹ 26pSf jm¸çÅOqÔÒ‚ä„‘øÝüé¸93ůÏñ§<_ž6+_Ž§ÞØÃÑzŒŠ’ãªt5Ö>uM·e¹¯Û´u@,\N8k ýÈ655f³F£1hËŸH*‡T=Rõ ¸Zªá¨ÚW­‚ÃîMøxZõ¨è4"¤jM¢»‹õ¥ 0˜ÜS —™|»bgZíˆãµ&Ïä˜;DDLòP°ØùÒÂ×y8{Î>3Ðo®KÃo®;saÙÐbï4ÐëÂ˵Xxy÷ÓdZ?YzÎççNOFÅ»‹|–µü{ wX‘,qͯòØUÙì©<À-—MÂ-—MêrYWe¹·Ûìu@4"Bó ¸jË!8ZûÕbB£9Ó Íd2ùüŒÄQ‰ˆ˜âžnÝYk„³Úg}`oàî2d®w vU4ª Kv'~˜ôéž©0˜ÓI³–&ŸRwæad4$£’!IÔök:t ‹*‹…Á ò“<t>ÑWâšÉGœ)ž» '‘:.–Á ƒaH?ßl6{']4 6ÔW´2ÑÃúFDDØIe‹ê$77õ š¶la0ˆüÀ$÷Š~ÁnÃI<~K¢ä2ü`¬ÆSoîÂ3·_Äà ÄÅ»"Ò ™¸.-ñññ½zmaa!6nÜTßG}Y 0ÑÃúœ'Ù ™¸hÂHŸY†ˆ‚(b„{|{Çöu™äñ÷<¢R© P(`³Ù‚nðå^;£!ŽJ„ìôß®–j¸LpÖÐ`òvíÜ­UºJdtLþ¨¢UÌ™çC'‘ci<“Щm<»Ë•O©:ó02⑉ć8VTI³‰(|`Wy9°x1V­ZÕùXÆQ°yã‰,<ûN1.~ìØÛœÈš®Á[¿›‹ µüeg@.ÞåÑMœœœ”^¿6˜8–ÈDˆãžž˜‰Ö·`%›8—]•íh–E ±²;þ"´ !´Ô@hm‚HÝ©lû{IMMEiié·Hž¤'æj©†«¹®æp5W§j|?@÷Éu‚e¤J¹{¼Ž !E„ï:}am̵Ÿ÷$n ¶A€¥É½ÍžÄÎ9®0:?#F$@¬ˆ…xT2$£’;•%³‰ˆ“<tbFDâ•G®f ‚Ü÷Œªía¢‡õˆŽ$^‹vC! ½ê{D¤Îíó{ét:”––Âb±Àjµ†åqÙ“ôA‡ªG+\ÍÕNÕÀe­ï6ùãѱLðLß.òo­¨@éNâ(b!’»[ëp,"¢ÁÇ$OÑétXµjžØPÁ`õRo=«V­ÂË…fTÖÚ< +,ÛÔï[{y4D1jf¸j+€~$y<Ó¨îqy222†E¹IåŒJF%wZ&´6Áek¸ê«|ŸsÚ!´ÔÇ—ˆŒö¶¼ŸNÞ8“Èéð&yˆ(¬°EÑ]$ކöF3„ÖF8ë«Ü ‹>è8ørYYYŸ“<áD$†ätr¤§¸zZÍ“ê+q7ŸËÄ QˆŸ¿" 7LôõŸ$aÚº;ïïs’´Z-ŒFcH¾¿—gêt£ÑÈÀQ¯lذÿÔëñ© 0D~`’‡ˆÂŽ³Í…Êupµ»/fÍšåÓ]€ˆˆü#IÔº8ìpÖô}4à³»lùËd2¡²®' "¿0ÉC4Ì8ë«`+z‹/Ë mO‚§µÞ ÀàY´hw< [Ñ xé?~‡üü|ƒB’$^란³ÆàS¶{sñ´äÜ­+‰xÌ&" &yˆ(l0ÁCD€‹Å1S®Úr­M}z•J…¸¸8lÉCDDHx9ÈX,èõz8×C2&³PH³Õ9àjwä½#¢$xÿîK‚§  5‡šà”%qK +,Û4 ‹c§ÁiÞë>ÖÖ!UÏèÓûèt:èõú>·äa¹&""®0wÒ$$ýþ÷]Ÿ·¢àR[[ëm"*ŠQ{§×$ 5õ6˜¿i ègL¹=’qŸ[ðxêštB&o(¬°lÓ@G%‘Ñ€½ ŽcûúœäÑh4Ðëõ°X,°X,P©T,×DDD}Þfˆ(Úšÿ [ƒ]´ˆˆlüE¡¥®–ê>½Gjjª÷1»l[òQÀ-[¶lÀÞËl6cãÆW»ÀÑ ÄkÑn(8ŽíëÓ{t|Ùd21¨DDDÀ$\ ¦/7íjô™& "¢ÀÉ£!ŽŸWm9\µ}~­V £ÑƒÁÀ »kQÈb‚‡ˆhðHÜÝ­„ÖÆ>¿‡'éÏiÔ‰ˆˆƒI" iLð IÂ$ˆ¤‘ýzŽ]¶8.ù#77]z)®‰ "?0ÉCD!‹ "¢Á#’Ê!Ž×AèÓ{t|™]¶ˆÈU*Œf(ˆG쬯GAAA—Ë9&O(_l)b½mí2f·‡«£Mþ_lKF%C1ç)ü=7%$¿«N§Ã¬Y³ P(››ËOAM1ç),½* ÚÑJƒÂ‚$Q ç‰ý‰D¸÷Þ{{=æšR©„Z­†ÙlæàËÄc6Q°Ëbòó‘Ýi9[ò„0‘<ºO7ù~líÃkÿ/Z´ˆ 2G›NŸ@G&†üwwø± ÎÊ}K§¯/"Ý.“ÄkH÷õGIIIŸÞßÓe‹-yˆˆˆp}ÇFƒeË–!búí½º°µ³ÏpÖñæL$“3 ~Z¶l&\u7¤c§õ|Ó£”[\ Ü0×ê8]&¤ò/Û,°XùcÁpÖñØ&‘pÎu% “¥¥¥°X,½þ,O—-›ÍÖ«Ö<½9f WLò¥R Nɨäo $£’½÷àçpf´§o:#!ŽJd@ü¤Óé0"q‚Ïn·ËÓ-<­8hxúéÄ™Dˆ8ztX”mQTB§›|~:Û:¶ðêJÇ$K_Zót|¹7IžÞ”k""¢áŠIžPßñî_Ó*xq>Ìo<]§Ëƒ–ÁI¢;¶­ß}^ŒµÎ3e"Lê›8Ö}Ã]Q7üº~ÒßU¹Ë¶(*¡Ç$Š8*Ñ›,..îõgi4(î.aì²EDD4À×v Aˆßx&¸›<×Ù€ïMLô G;¹¼ÝGÄZwÑÀß{¦ÞyÈÉ€ Cu6à{³; âI°‡ƒŽ­2¾¬äyd8*·¸p¬ùôu…ŸÉKO¹éëÊjµ» ,_&¢ž£ÐhÄ>†‚È¿û† Ä/ΓҼ n78ù+ì0ck°­ìtÂ!2Ú]( DR9$ê§oˆvm†¶•9½ U©ffø\D%zÇœÚat¡ÎÆ}=ÜlÞº4ÒdÿÊvÇdP_Zóxfå2›Í°Z9ê7u¯¸¸Ÿ (xŸCä×µCúdgpÿʼùg¶0NÞÚsæ¦3bÊ<¿^ãj©†}ïzäååñÔ^’&Ïô¶æys·“7ÃÃÈ÷&v›Ý‰=ɘ4Ÿ1ÑΞw=6¼ýJŸn‚‡â<o~ßÎ †‘u%Ž3­xÔ3üP¼íÀŒ1€{æÞò ¾ °5P9f “<ápã™”æí:°Û,`]‰ƒA&æå§\îÍM§ÐÞ Wƒ ƒ¿žö’H*‡T›À=6o†‡‡r‹ ï—žié Kã÷k] &˜Wôi¢Á$• ‰úBÀ±fþ`0\¸“—§îJð»§lŸ:u `±Xz=³§%Àqy(x„Ê1›ˆè\˜ä 2V«ƒ®G«ß¯‹˜2Ï;¢'ÑÛÏðdkðևυyon: >7§jŽ@hõÊ,iR¤2½7Ã/~åÀÑ&Öµp¾ ~E&ÁqáA=uzÊvDê\Ÿ ^ØÉ$f8[WâðI^Fö±lGFº[7öe–-ϸ_˜——‡Ã_¬ãXï†ò“Mœ É÷øGu6àÿŠØatñ†8ŒÔÙ€·öøÞKµYG%†uÙŽ˜2Ï;>'‰éé¦Fá¡ÜâÂ+ú?ô3y™’’ÀäémëPO—-£ÑÐrMDDNƸZ¥BNNN—Ë¥ Qøð$zÚl…«¶Ü{“2v$pI²“T"Œ‹1P!wA. ¢N@é1§wÜÀÝEK–:‡ ž!1e£’Ñ~p+ZîAy¿¨.#´1bLR‰ ±¾…’:Paqaß —O‚Üs* žþžGä3îDÛ­pžØ:°®Ä‰meN\0FŒ N—m -G›”[|Wå{ˆ ´Z-8›Í†’’dffúýZFã}l2™|þ&""¢¾a’' /Ð#§Í‡ãø~8Œ…vk>âø á#2²‰³9“V0@“Ò ’G£½r„F3ZîVt»Í¬oáb¸&S#¦Ìƒ3Q‹¶²BÀÞ„:°ó ;±UOx\+DB¢žqz0ùþ•í””ÄÅÅ¡®®®×Iž³Çåa’‡ˆˆhîQ‚ð½ù”$L‚óÄÏpÛ¡¥†A õ‹ò¨HÇNƒdÌT¶Þ ¦$À¨dHfÜ g­Žc?ÁU[Π„ºÈhHF%C6q6Dòèá[¶ãµPÄkÝ?Û¡Ñ̲çI¼v@’;edd ¨¨¥¥¥°X,P©T~½N¥RA¡PÀf³q†-""¢Ê0a|1'•Cªž©z„Ö&¸l pÕW10!F<*⑉Lì„À ±$^ pÖWAhm‚`k``B©®E†H3,ºeõêB!) Ò¤4ŽV¸š«!œªÐfc`Bí<¢ˆ XÒ233EEEÜcódeeùýÚÔÔT”––r†-""¢ºvc†‘<y´ßÓlQß±žQXžG¤rwÙfù¦³h4¨Õj˜Ífö*É£Óé¼-€¬V+”J%JD>233‘ @áç íD}•³y3~=e nLIR& ÙïÁÙµˆˆˆˆ¨ß7a`±XzÕõÊ3:¶æ!¢n/¿JMÅ4†‚ì§ÚZü᫯pÉ{ïaÅ®]ø¥®.(·³Àa›­Ûó&“_.++c`‰Çl"rÍííX{à®û׿0ÿãñ¡Á»Ã4ÛW àÝ£G‘——×õýw!Ñðâér¡ÓéØ,ž(À$£’¡9o’ßÑ…RÙîxQ©THOOèõú^½—VëÏŒƒ/ÙD4”^ºòJ\’”Q‡çöž<‰'¿ü¯[‡çôz”××ý÷`’'È(•JhµZˆbÔÉ8Ð.Q iµZ(ã“!RÄ2IJMÔOžÖ<6› %%%~¿ÎÓÈØÃx,×DDH u:lÈÉÁW·ßŽÇ/¼ê‘#½Ëív¼ýÓO˜»iróóñIy9ڜΠüLòFƒåË—C>ãNÎðB`Ë—/Çysî…4)Á –m¢~ÊÈÈ€B¡€^%yüí²ÅrMDDƒrOßÍœ‰¯o¿ë³³±05ÕgùwÇã±¢"\ºnþû»ïp¸±1¨¶ŸI""""ê7¥RémÍ£×ëaµZý»˜î0>œÙlf ‰ˆ((ˆD"Ì;/]u•÷9•üLo›ºÖV¬*-ÅÕ6à®-[°µ²í.×o7“½õV||ó͸{ÊÄFFº'‚€/M&<øé§˜½~ý n“IœîhµZF –k"" )jk±±¬ ù°´¶ú,‹’Éô³¦˜©V#iùò.—3Éd¬V«÷âFhoe@ˆÈSפ#Æ1IJM4€fÍš…7Š‹‹‘››Ûãkt:ŒFc·ƒ/³\Q0ihmÅ¿Ë˱±¬ ºËë‚øxÜ1y2nš4iP·‹I""""PJ¥ééé(--…^¯÷+ÉÓ±µOYY™wÆ-""¢`á|m6cSY>;rv§Ógy”L†'M“'#->~H¶‘I""""p(--…ÍfCII‰wÖ­ît|Ù`00ÉCDDAgöûïãX)wlµ3b€»gõ“z.AðY6B&ÃMAØj§+Lò 3"™¢5&%*züE•ˆúYßbÔ•JÅ`PØ•mÎ#:qqq¨««ƒ^¯ï1ÉÓ1©Ã$ñ˜MDƒéÑ¢¢NÏc«}ö=Šˆ¼<<ùä“–3ÉC4̈£!Ÿq'–ç¦0D&Ÿq'n¿* ÚÑL¨Rø•mÏ#(**‚Á`€Åb9ç ´F£B¡€ÍfƒÁ`è1)DÄc6 ´`oµÓà°ÍtÓµYÌ]HDDDD2wî\ïã’’’×W«ÕÀÁ—‰ˆhP¥ÅÇã/—_Žïïº ¹üò ï–Õ¶ä 2:«V­Â* ¢[µj^.4£²ÖÎ`Ë6Q€¨T*ï¬Y………ÈÊÊêñZÈh4Âl6Ãjµz»„±\Q ÌŸ߃I"""" (ÏtꋥDZvRSS½M&t:HDDƒJì:zŸ>Œ}558ÖÒ‚¦¶6ÀȈŒ1SU*d«““!O')v×""""¢€š5k–÷qqqñ9×í˜ÔáTêЬV+,K§ç- ¬V+Däöž<‰_mÚ„»·nÅÚPZSƒ› v§v§µ6ö×ÖbCYúôS\õÁØD]Œ™ä!"""¢€R*•HOOï”êçâ—§¬¬ŒÁ£bµZñÒK/áwÞé´ìí·ßÆK/½ÄDO/£ÐhÄ>†‚AQUnûä”74øýš£--¸oûvl©¬ ŠïÀîZDDDDp(--…ÍfCII 222º]755f³F£‘£áIð=zZ­¶ÓrApôèQ¼ôÒKX¾|¹w¼©4{Ù&<4/ ·^®…"2?r.AÀÓ;wâ’¤$Ä+Cú=Ø’‡h˜qÖWÁVô/^Ì_H‰ÌVô^úß!??ŸÁ °+Û½=dffBqú·§Y¶:ŽÙÃY¶(ŽÙžO[[¦OŸÞízÓ§OG{{{ÀZôì-¯Æo_.BÊ¢wñÔ›»`0×sǰ{|ò Z[[Œ0³æçŸÑÔÖ†2V_=VÏ›‡ÜóÏÇÔøxÄÊåˆH ‹‰É*¤¦âÍk¯ÅºnÀ™ -ííxïÀ!ÿLòÑ ð´ÞÑëõç¼Áå¸<J:&xn»í6ÈåònוËå¸õÖ[–èÙ•w+n¹,M¶6¼òI)2–¼yü}SŽv‡“;k8pÏ=÷®¿þz¬\¹‡fP„g\'gÎÄ•ç˜ àl—‡Ç/¼Ðç=†“`÷îÝxúé§‘×_'Nœ` BPK{;@Õ‡1uFÞiÖ)À…§ÕkGLòÑ ñtÙ2›Í]N3íá¹xå´ÓŒzJðØl6 Ÿ6›Íg@'z:zÿסrõ}XùÐå˜61ßÂMÿ•´Åïáo›Dm“;µ—4 žyæïßsæÌAcc#Þ|óMÜtÓMxòÉ'¡×ë™H !##"5}¨‹õ§ÇhВɆü{0ÉCDDDDƒfîܹÞÇÅÅÅÝ®ç™Fà¸<\zJðÄÄÄÀl6#//Ïçß±cǘ˜è³î`&zâFÊñÛìiøfåmØóÊøÝüé°·;ñì»ÅÐÞ·÷ç}ÆÛ/¼ð¶oߎßÿþ÷HMMÅÎ;ñè£âæ›oÆš5kÐÐÀ^ÁÀŽ>Œ³TTåne:6*jÈ¿§P'"""¢A£R© V«a6›¡×ë‘““Óåz›¡s6H þtÑš:u*¦Nê÷{z=›6m èôêMIŽÃŸeâ¹»/Åÿ}RŠ?½ÿ=>ØiÀÛOþŠ;¹bbb››‹ÜÜ\TTT`Ë–-ؾ};^~ùe¼þúëÈÊÊÂóÏ?Ï@©«4üpò$VîÞ)*.IJòëu-¼´{7÷ ÌC-yˆˆˆˆhPeffpwÅ:×éZ­§Q§àП1xz2˜-z Ì\«õ˜üàZ<óN1ZÛ¸~æxîä”’’‚Ç{ ùùùxüñÇ!‹±mÛ6&ˆÝ=e ¢#"ÐÜÞŽÛóó±øÓOño£euuhjkC»Ë‡Ë…æ¶6”××cKe%–}ñr6oF­Í†±wM™2ä߃-yˆˆˆˆhPÍš5 7nà€yÑ¢E]®§Ñh`4a44RLðxºEOã);þµ«ïþ‚ï~q <&N‰§o›û¯ MÂÈ Œ}nn.ª?ÿÎo¿ ©2søðaäççcûöí8yò$Äb1fÏžÍÊÄbårä]u~ûÙgp v>Ü«®[ÿïÒK‘=ä߃I""""TJ¥ééé(--=çTê:EEE  ©ÁHðx*ÑsÞgøX_ [›"puº\79—N„TÜ;4 ¢T*4…@YiiiÁ§Ÿ~Š‚‚ìÛ·ûï¿óçÏǘ1cX¡‚ܯ&LÀ{7Ü€§vî„©¹Ù¯×ÄÉåX1kn9Ýú4ÐŽ8V_¨‚dggwZÎ$Ñ0#• Åœ§ð÷܃(ÀsžÂÒ«’ ­d0(ìÊvÏ#(--…ÍfCII‰wÖ­³oò˜=˜ Hô˜jšñò¿Ï$P?Øi€j¤ÍKÃo®›ŠIcc¹ÃØŠ+PTT»Ý‘H„‹/¾ ,À•W^ ©”·Ý¡dÖØ±ø"7EUU(ªªÂþÚZoiAóééÑGÈd=b΋Ãj5®;ï<(qV­#vY,@~>“bÁß6ÿˆ_ápºÜ7¬““ðÀõS1ÿ²Iˆ”I¸cȉ'°nÝ:ïßÛ¶mCLL n½õVÌŸ?ÉÉÉÃ66÷Üs.\ˆk®¹fPëÍ@’ŠÅ¸fÂ\3aBèm;«gp‰Gvv6¶ï¯ƒXÁ ;Q eggã»Ê&4FŽe0ˆe›hddd@¯×£´´V«µËÖ :z½J¥Qç]ÄrMƒÆf³Áb±`âĉCr£*—Ë‘€ÊÊJØl6¿^sñc@€˜¸ý*¸n*¦ŽWqg òòr¬Y³;vì€Ó餧§cÁ‚˜;w."""†}Œ8€çž{ûÛß““ƒùóçcB&KBg× 2*• 999Mœ ‘<š!  œœ$¦] ɨdƒX¶‰†@VV–÷qwcóxºlÙl6Œš˜ÁrMƒz]¾|ùrTVLa IDATVɬHÛ·oGee%ž|òI¨Tþ%j2Rðê#W£üEøÛâ+˜à €;î¸[·n…B¡Àm·Ý†>øo½õæÍ›ÇÏikÖ¬AVVN:…÷ß .ÄÃ?ŒÂÂB8(ÀØ’‡ˆˆˆˆ†„F£ñvÇ*,,ôN­ÞQjjª÷ñ©êÃ@Ôù  j]¾|9^zé%lÛ¶ ×_ý |nÇOoƦúfåmÜivþùçcÁ‚¸öÚk¡P(.L™2ýë_a2™°víZlÞ¼»wïÆîÝ»›o¾7ß|3¢&yˆˆˆˆhÈdff¢  f³‹¥S‹…Ž7¸¶úãLòРìDO_<áÊd2¡ÚbÀè ØžµkײRô¢î<óÌ3ؼy3`Μ9øúë¯ñæ›oâwÞÁìÙ³±páB\zé¥A7æÚÑ–l«¬Ä÷'N ¢¾µ­­°¶·C,A)•bôˆÐŽ…ËÆÃµ&`T=Ä$…4‡iœ5F¸L€4’QÉNÈ„xäh‡(€uM•™vë…4¡½퇋᪯‚ÐR ‘<â‘î²-’Ç0@ƒÄ“ä€ââbääätZG«ÕÂh4âTuļç¥!ºYŒD Wm¹÷oOÙö)ã;æ ;ï2lddd`õêÕ€ÂÂB,Z´ÈgyÇÁ—]LòÐë)Ñc2™`µZ; $þÍ7ß ¼¼¼S’g <ݵÄñ´ðéi9õl8·Âh‡F~~>¶oߎ“'OB,cöìÙC¶=€±#Fà½nÀÄnºŽÉ""]\rRRðÛôtܽu+ª­Vløå—€'yÆŒ…‘—u}Â$O)++ÃÊ•+ÓoçEL7çÝéxîj®f°èœ/^ì>NÈ„lâläuíì§Õ-5 Ëvh•í–sŸ< Ìsoh`)•J¤§§£´´´Ë©Ô•J¥÷±óäA–kr=%zT*U§®‡ìô>lÁCÃMKK >ýôS`ß¾}€øøxÜÿý˜?þθuÀbüqÖ¬n<]ÑÅÅáÙK/ÅãEEØWøëâñ´*’ºèÞ 0ÉC!HpØý\¯•Á" ºv®Vs‹B‹ßç;c5ˆ233QZZ ›Í†’’dddt½_Z›, ý£‡ nV¬X¢¢"ØívˆD"\|ñÅX°`®¼òJH¥CŸšhjk\6n\¯_{…Z h´ýµƒ˜EBMÇÖM]uñ´.G%2XD¬kÞ..œ…ˆBŒè燎e›­iWFF ÷°ÊÝgâÞIÎ[c G®–j¸LþQ`y=•••ضm›ß¯c‚'´mÛ¶ wß}7®¸â deeá駟ƱcÇ|ÖY²dI¯ÊD8:qâòòò|â&—Ëq×]wáÃ?Ä?þñdeeE‚b"#ò>lR&óy¡Ä–<’Äñ“àª-÷,óì›QÎøC4pu­ã ðÙdš™ …ɨdˆ¢!´TwtÙ{… iRƒ5È222 ×ëQZZ «ÕêÓMË'¡Ñ\V?æ­MZÝßÍ3±D»­Ë®çIÜH#}â)’ÉÏ ¨/‰„x¤{™H‘<šõ,g·è‰‰9÷C‘à‰AÚNȤŸeöv§ûœ/æôäþذa^|ñEŸç ±ÿ~¬[·Î;ø¾}û°{÷nŒ3Ó§OV1*//Çš5k°cÇ8îò•žžŽ `îܹˆˆˆÊížZ› ææf¤ôr*÷£ÍÍЫn^Â$…¤ˆÉóЪ_Ñé¦ôg·2¨gpê[¢ªk»ßNwèT×’ÒX×(DËöõ°ÿøA§óˆç™v[© ¬¬,èõz@III§k½‰Žú* „’p®–j¶F§jà²6tJè 9‡½Ó¶8kŒçNœNøˆG&B$S@«ÖI Ž‰AœÜuKÀ½{÷B,z žØ‘¨o±Ã\Û‚óÆøÛÌ5ͧ׉àAÈ6l,\¸<òª««ñØcáĉØ´i|ðAÀ}÷݇×^{ ›6mvIž; ** óæÍÃüùó1iÒ¤ ßîkÏ;{NžÄGF#–_tQ¯^›_Q˜wÞyCþ=˜ä¡$’É!Ÿµm·ú´2€4²ó.ƒ”- ˆ®®]´ˆuÂŽxähÈ/^û­:ÞÜJ#qÁ-ìª5„7Êqqq¨««Caaa§$wPì–àÛ'™Ó|.[CŸ§)@uV¾D)4‰[[¨bÐi]›0ùÑ»ÍÒÔ6¸[kÚÚD0÷°ÙBk#„ÖÆ.U¢¨ˆ± ‘<¢¨ÄaѾc¢§ÛcÏ$x #%_”šñ±¾OÜâ›pØüûæô‚ ñAW‡ÅÕéAq‡š§[ÖâÅ‹…¨¨(,]º+V¬ÀîÝ»½Ižììl¼öÚkØ¿ÿ°;–ŸþùX°`®½öZo7ÜPp÷”)øWYþYZŠóãâ’â×ë Á?JJ;&OòïÁ$…ôͧgÚfg}ÄŠþêJĺFäÙ–Ç@~áÚ[áj©fÙ™™™(((€Ùl†ÅbJ¥:³ÏN·´Zj 8Z!’ʇl;G+„–¸LpÖñ»UNÇä.Ùý}û,~ûÛßbܸq§Ó §Ó‰Ã‡cåÊ•€3f »ÍœÙÿ1÷ìÙÃÂÖGLòO?Þ'6T0D¶|ùr¼\hFe­Á –m¢ ’‘‘Õ«WpOM¼hÑ"o¹þ¥x œæ½NÕ Øç ŽV8ÿ Çñ}]¶ØQ'¸»)Iê0¡ÓWžV?:Í™?e&À\¥ÐÚ‡éwÂ'*Ò¤i$Mò.]DUUUXµj*++a6›±cÇï2—Ë…K.¹Äûwdd$î½÷^-Dì»÷^DJ¥ŠDˆÅ!û=˜ä!"""¢àJ(•HOOGii)JJJ|–‰GŽpOûÝRݯVBk¦Ýpß8|“¢é)2´"è4ÝÏ\EÓôɺÐÝÒ§¤(1 (­83Ðv»±í‡vA¯…lâìa;U; ½gŸ}<ç: …\p–,Y‚””á7DB¨¶Â‰ŽŒ ‹ø3ÉCDDDDA'33¥¥¥°Ùl(..öN§.•ŒöÓë¸L}JòŽV´Šà<á;µ±:ÈLfM”r¶Ö J¹»{WæT¬­€þ@‡>;œ'öÃyb?$cÒ KÖ=4èÊËËþóŸ1gÎïx/ŠÕ@¨-÷™ŽÛŸó_Û¾|^3k*“ÉîX¡àÌ8>@~ñédÃŽvc!œ5DL»¥ß­zÂᘭ¼ñÕ~½ÞúÉÒ ûN&“ •uuÞ|Cíßÿþ·ßë:NØíöay½½mÛ6¼ÿþû8räd2fΜ‰ÇcÇŽõ®³dÉäääàúë¯çAn1ÉC4Ü.Ì›«ÑöãÈÛ ,[¶ :ŽA! ûÞõذ8•Ýí4ÐD¡Z¶ã<’••½^§ÿw–$n<\µîÙ¯Gk7÷®–j´ØâTYÜv5[î„"U4°è:wëžw·»»q¹L°¿Óné×MŠë®»nØÅèØ±c€Å‹#** QQQXºt)V¬XÝ»w{“<ÙÙÙxíµ×°ÿþþ¾"qr9ZÚÛÑîrÁ%ý¼+Ì4 I¿ÿ}—Ë™ä 2V«ƒ ´·2 Dä©kÒã bÙ& Rž$XkÍᄄŽQCh4ÃY_…î†>uÖ½cðdÏrÏÚDaXF¦6;°ñ ÷tëŽãû™à¦A'‹‘€„„dffâ¹çžÃÿøG¸\.Ì›7oXÅ"..ÕÕÕ>c¥§§Nž<é³ÔÖÖÅvûÛJ4µµá Å‚——cSY¦¿]}5FFD }Ydu$"""¢`¥R© Õº[êˆ:̬#éní,4š»}­ãØO².d,ÃYÖ…îý Ón„†Ü½÷Þ X»ví°ûîžq‹ŒF£÷9Ï8çI…ú ¤—Ž‹%øÊlƇ§[S%&yˆˆˆˆ(¨eddx»ZªÝ±Q£½Ï §Î}S/â0<ÃBìÈ3]§˜è¡¡e·ÛAÐ}g°%%%áí·ß†ÃáÀ³Ï> “ɉDÀ=­¼ÓéDEEV®\ ˜1cFÈç[N·8ÝÜ¡õÒPá˜=’Á ²1IâLÌŠènL¨’ó•í»q1†Šuj¸ùÉÍë µ— j%ɾ7B$@ÌãçlTeÀÊÍFÂm—kž³évH¥RDEEáÙgŸEHH/^ ½^ïrS¨wëÖ %%%Jðèõzh4‡ûòÙh4B­×ãüÕ«ØQT„wô½>-¼-U8[_´2È3“1P¯°|ƒsÇ ˆb,ÅÏÀêFÔkxˆ-cBö!-- o¾ù&>ùä—Kò””˜È6lX‡ßÓV ©;%âý÷oûw<اÏ]ÿœ]ËÎÈår,\¸ 3àÖ…_5ÙÒÂ… 1fÜ»Ç1IJMä åÞ]éq¦.6`¨RµúÞ{©§)ѳr3°á{ N͘:²:µé8®Ül:®ROÓq&²UU¦ßÔœ——‚‚‚îŸ'-2DGßõý`K;#“É P( ú™‡†ÈÖ ¼J¼ Th bÙ&rr-Ž m~6Œµå0ªk ò‡®º¤ÍÁ—ûGƒÀò @½È> äœ4bú½†qœ‡³ÿ$°5¨¬1½–z‹¦u ë ãCw—V«EII þçþâr1h­UŽÁ`@ee%”J%Ö®]‹ÂÂB<ûì³7nœC~NÉdèíë‹û£¢j'H3“@DDDDC m~¶éaªø D]{š×é¯Cäßò7Âò`àO~0% ê5Vï0% Ò’‘€LÂøÚ«:5ðËËä Œ4uÁ“ILƒ/ÙZg§PôÑG´ëÜÜÜ„   $''ãµ×^ÃK/½ƒÁ`]ÚÎ>õ”SÄ™I""""r‚Ä‚oŒÕ%0Tœ¸÷ó:CIÀ”Ä™;ÖXiÄå«*k€Õ;©§ÉýM-{88³ýP•™’r9'Mãî4êÚň¹ã(äƒäärrrPªT€#uBŽ‹‹Ãã?ŽáÇó ¶bΜ9زe >ûì3—·È–˜ä!‡¦¿R ýÓÔ©àæ÷°Db~GĺFÔ²]®„¾\y£lEÃ=d˶½ßÀvƒ¶ºFu5 WË xÁX[CmY‡Þ¯KŸsÈ9a„ò¼€z€ì£¦®\>ÀØD >Êô3ÝY•5¦•sr()oLâ˜þ 2›äþLœœ(•JÈFÄ wÿ¸·7…zii)²²²°qãFœ={–Iž6ôèÑPTTÄ`XóÉ£jÈφ¾äˆÅ2C• :ÕaxÄNió›"""ðòË/C&“aÊ”) NKuNc;ÐÃÃÁ°"&yÈ!éT‡-:F£éV@ '¾‚dØ<~Ûʃ¡J…ü* ®®Ž¡6éË•æºÖXÏšÕµ!s!H|¬ªTPU•b ;£-Úg‘àiz1Ö–¡át&$Cæ2Pm”í»yÜ%… €þb.ôJˆ£ÇB1Ði`T×@t®ùBnú7m °ÿ“ ”\ǹ²æ>€)Ñ£úÈ™ô¹Uuj ¤ÈWyŦOK‚€äÀ°~€L"Ü‘rÍs6YKJJ ^~ùe¬]»Öå’<·ˆÏ$ÖÅ$½]ôêêPRRCU)ï îLR´vs~óM¹Ú‚]ðˆaßNj]~~>®•—è‘uúÀU4œÞn®kBKM¤uh‹ö±®±l;£ºÚ"y)B³òm¬-ƒNuîò$ÌN˵(8Ú"±ÓH¥î¡néoÈ$ÀØA¦]†òŠøåÌò‘_b™” 2B!ÌIÞÜ\eil|§jÚ«¹èF$ôØUާPoŸ——$ ¤R)ƒÑ Dõè®Ó§·¸žI;£R©°bÅ €G vƒháê%@gjÚ×ÒCgã »¡¾šÁ¢6-_¾Üt"ŒH¶¸“n<7Öµ¶ëdƒÅ²í`בeVhc|‡ŽŽïBw§\‹£!¸{¨ÓÀpõ⟯·˜äi*À§1á# NmJìä‘_"˜[ù@I¹éucKÀ”ì ð1"ÐÏ”ü ðq„Eeé_¾ÊÔZGU¨Ê-LnÚ 0µÖéf„"\¸ž$ãX;äØnžB= ÀåbÐÞ¸EçÏŸGff&vî܉ùóçã¾ûîcÁé_AR)BŠ×3ÉCŽ÷àÙ䡳¥Ö¯¼9'º½á&J¬%r˜²ÝÎõ_8·þЗ…¡¢‚Ow ¦†ÚKVÿ;2Éõqy¢L÷uj@UÞØÝÈ4psS¦–>Í“M“?€©Ëà8I Æ$TV›~Î+ê4mµÎ±\ÝãzBçz¹–¶!²gíŠ4yòd­ ©TЍ¨(<ûì³ ÁâÅ‹¡×ë9»–1ÉCŽwCçݱyMoÎJd˺fîæÂºFŽV¶ýäÍÊqÓרšÖnd»ÇA_bjB#¸{ÂÀX[£NmÓ.ï2Éq|Ò’Må'Oeǧ¢ÊUPR! þ¦Æ­%šjçG&iy*÷>ò¶ßw3UPßÐþgÊo2fuE••5¦ýlšØi㮫ùƒœ'hJhɃMûgú–ž>€¦†úËæå†«ewœ_¬tF5€kõõ¸šŸ>}ú4[Ï$9ÅM:ؤ¾ìÁðH˜÷v‡\Îñ&ˆuÍ–<f`J|F÷`0ì” –°lßbÙ¶‡ëˆzßJ5WMÇR`ëâ9›È´7…zii)²²²°qãFœ={–IžNúÀÞóçåË‘‘‘Ñl=“븉±ÀçyyxÔhd«);w@îùóðX¾‹-j~.cˆˆ\ì†RâqïHK‹d0ˆlLÜ{† Et7ƒANW¶y¹}ž"|ðå¯m&yþoÝ)ˆÅ{ÈûÝï¿Á·Od€yξã:’à7nª««;÷‹ƒ‚l¶ÏÅÅÅøµ¤Ä*¿ëðáÃVÛ/5€b­ÖêƒUßS¸fÇÅŠ‰ž¤¤$»‰£3ªp¶¾ÈÏoq=“Ð4è‘ò~Ù<ó^Þƒ–¢®^‡ã[¦â¯çà§_Ê`0‘8 ÿ\œŒ8E %™q­Ö]¾|]»vu˜ýµF¢§3 ­ªª*¼ùæ›øî»ï ‘Hð‡?ü…æ6qàe""""¢»À`0âቑÈÞËë,ÖU_mÀ–]çðHZ´Åò­»Îá/+âíîAùþÙøào#±äŸ‡°ëÀyx{‰Íc÷óž²Hð´õ>ð»AÓ`À³Û‡Y÷GãÂÞGqåÐ\Ìþïï1ylJ÷<Šó?>‚Éc#0ïå=ß}÷âââ°víZ«wOsElÉCDDDDt—xÉĘ™…Œõ§ñ׉æå«6åá¡q½áí%¶ØþÕ'ðÎ_†áw#£‡tÇ‹†båÚSH¹§G«§½÷ ‚€+Õ¤ÞŽ~×Ëü¾Ógªðè¤(Ȥ¦Ç†?ΉÅçÄòÀ&xšjhhÀ¾}û°cÇìÝ»šë‹D"‡ü<¶ìºU[[‹+V`Ë–-‹Åxæ™g0{öl‡•½a’‡ˆˆˆˆè.zæ‘þ3k+þ2/b±ŒF#V®=…­+ïk¶íÑSøÍ0ËdNÊ=Ýñ⊃mþ޾oÜHy³×‹Þ<€{ã·Ãà õæ#Lð€^¯ÇáÇ‘••…]»v¡¶¶ÖôíîŽaÆ!%%cÆŒqØÏws¢Ç:„W_}/^Dtt4^{í5DGG³BY“<äÐŒêjèÊ•0^-ƒ õ…à ÷ ž$ˆ¬^×´jè.æÞ¨k_¸‡``È9®#ra¬¯† õ…›Ÿ"ÿp†î¨è_Ä*ºbã·…˜11 ;ö¨Ð;Ì}zù5Û¶¶N‹€¡Ÿ6[îéÑö7à}_PW©ÅëÕoŒÁ²Áü‚'^ú÷ã_/Gd¸œ‹[µj***ðÄO¸\‚'77YYYøöÛoQYY ðððÀ¨Q£’’‚Q£FÁÇÇ9êȸ‰±®ºŸ_¸€Á·ñ{Ôj5Þ{ï=lذnnnxì±ÇðÔSOA,³2Y“<ä°ôåJ4œÞè,`ÔyÃ3n ‰/ƒÔRÜ®£áØ:ÌÛ,\¸ …‚A¡[«kªÃðˆ·.ݤVÔïz Ëv©©©HKKc@쌶htEûš-ECÜw<1Ç•h«ló:b]Ï>:/.?ˆ‰£Ãñ꿎⯠µ¸oe? oNýþ[}ŸLꎗŸIÄËÏ$¢PUƒªš<’¾ 6Læ9ÛÅMš4 Ë–-Ã÷ßïãÎܪɓ'›gã’Éd;v,RRR0bÄxyy9Ýç=yò$Î_¸€T˜¦î¾'NœÀ+¯¼‚sçÎ!<<¯¾ú*bcÙíÓV8ð29n¢âÄWæ‡Î¦ÓíkË 9ºF­š"²q]k8ñ5ë9$êp‹ àzbóÄW ÝQãGÉQ§Öá…e?árµ¦Y·©Fƒcƒ°ïèÅNÿþ[}_S½å>èå‡\åe0‚\.Gzz: ‘™™é2Ÿ»¤¤2™ O<ñ²²²ðæ›oâ¾ûîsÚÏŽ; îÞ¯ÕjñïÿO<ñŠ‹‹1mÚ4|ñÅLðØ“fÏžU«V!00ÿþ÷¿ñ /pö5+à•¨(ddd´¸žÝµìLEE¶nÝjz˜ò ƒHÂþÎ-Ýœ£˜Ñh„ Ð_)†¸×pŒZÕX×Ü#’9GKuI«6×µ›6A`]cÙvX†újsë´Ö¤Àpµ e¼X®ïœß?¨À[þ‚Çl½ ܸQrücá,øÛ>äUÃKêŽß$÷À¿–Ü8üјôt´:ÊöÏîðûZò΋Ãð‡×sðßËBìI!Xµt4 $™5&z–-[†ÌÌL§ïºµyófüòË/øöÛo±sçNdee!++ ‰ÉÉÉ;v¬ÃwݺÝÏG}„>ø:©©©HOO‡·7m¿S˜ä!Ǽ9o|½þyóÃ'p=DD·^×jË:VתT 9VÙn§Ì6–÷¦u€ÈŒyOY¼ö÷õÄ¥œYín7}B$¦Oˆlõ÷þnDÎïy´ÙòöÞwó߀ÄAØ·ö~,j“+%zA@||<âãã±hÑ"ÌJu˜ä!‡ã&õmöÙâ ˜/Ý^]óîЃ°à'g°È±Êv;e¶ñÚÒ^ "¢\­E`Jb 6 Æ ƒF£ÁÞ½{‘••…}ûöaÏž=سgD"~úé'‡ø<ÖHð4¥×ëQ]Í/Þï4&yÈá_@â¨kšµ.húš]ˆn³®‰%æºvsýb]#Gæ&õÜ=-oéK·.LòuFG=Ë—/·»ýŽTµ0>[gxzzbìØ±;v,jkkñý÷ßcÇŽ·Ý*ÅñzòÉ'›µ.²f‚‡-qî.&yÈ!yÄL@ñu­vÄîa‰ ‘ êšÅ1ë9 Aâ q¯áÐ*w™¯7'zÜ£àÊÙ?ˆˆ:«#‰ž… B¡Ptè÷-[¶ J¥r£³ÚhÅ»>³âïòööFZZÒÒÒpùò­ÍF×Àã¬=œóÕÕÕIk·à¡»‹³k‘Cù‡Ã#vŠé›Ø›oÞ½ƒ!ô°©Y½®5> ÞÁðŒÂºFÉ]ž÷&†ßœàñˆ™À Ý"kκ5}út‡­<ëׯÇ;ï¼Ng±¼¶¶J¥J¥7nÄ‚ 0kÖ,V–N8`oe%°uk‹Iv×"""""""›°f×-{°~ýzÀÔ©SñÃ?`Æ  Á¥K—ðå—_š·{ì±Ç`4-–Ù ['x>Œ·ß~:'Nć~ˆï¿ÿ?ýô~øá|øá‡¸ÿþû¡×ëñÏþdE±"&yìL`` RSSá‘ 7©BdC©©©ê7 nìšA,ÛD,×Dd3M=Ž®´´0oތ߭ª¬1Mž¯2%ušvÅjÚjÇ-0¢Ðù‡3hdöîÝ OOOvÐuÌ^u¦åRc«ŸC‡áðáÃxúé§qöìY± ~zJ$ð·aF(ÂM‰&}Ú¸Ç-3 –¬ºdD~‰€Êš¦k-ÇÚq Œ†(8¢ÀhŽìŽDÂ&}¶ ‚¹‹–··7Ò†8IaaMOoq=“ôWŠa¬.1oSYs£k`êÞ% 6uíjLü¸Rò§²æF+ÊjÓÿªòÆîW€E¬Æ%¾aù‡C܇_p’CÈÌÌÄ_|sçÎA,#)) üãѽ{wó6óçÏGZZÆÏ€µ¡i«OOON¡nLò89£ºúr%tŽÃX[΀8ð º{hDAÑ$üšÐ.ëšN CEôåJè¯3Áã¨uMâ QP”é[dv 0—mý…“0TC_®d@ø:"ò‡»|0¯#·ÀÍ;nÞÁ_­¿R ãµrè/Ÿƒ±JenéS¯_rczï¦ú„>Fú  2%¤¦n_ަN ”T˜’8 :uËŸûz ¼ñ“»'?9D]{ÂÍOΤ9œõë×ãí·ß¶X–ÜÜ\¬Y³Æ<;ÔñãÇqèÐ!„„„ !!£;†I'¾)׿ï‚þb.ƒá dz¶Ze6´Êlˆ{€(lÓ´8pú‹¹ÐîƒQ]Í`8z]SWC§:¨ŽÀÍO÷^Ã]6ÙcÔ©¡/9 mñ!&-ä:¢«-‡Nu¢÷ÁdÏmù‡þáæé» µe0T©`¨»ÃÕ2àZ¹9ñÓÈ”i} ° #dždËÄOcB¨)kµj))S§JÊoü¬*»þ³ææqs:F𠃛Ônþáù‡³Ü‘Ã[¿~=`êÔ©X°`ÊÊÊð‡?ü/^Ä—_~‰'Ÿ|ðØcaåÊ•øòË/™ä¡;ŠIg|à¬P¢áÔv‹›r___„††"00r0¥¥¥¨¬¬Duµ) -Ü mñ!xÄNaK;xÖ]kÑJN*•"((Ý»w‡»;O±Ž¤¦¦eee¸xñ¢é¡­J…†cëà.O„8z¬KÅÂP[††ã_Y$.¥R)BCC-š¢“ã”ísçΙ¯#ú‹¹Ð_Ì…8fÇl²’Æ–>7_# WË`¼VÞfò§QÓJã@Ïö¥õàˆ|Ÿ~ú)Þzë-óut:y;¥Ò4YËYÛq¹çÏÃcùr,Z´¨Ùz7†È :Õ57æLð¸Æ úÈ‘#Í”†_Á¨Swè½nÞÁ$ÎDzz:ärNÏÞY Ç¿27£‹‹c‚ÇÉ5¶Šl<§ê/æB_Ññ¦$‰31ãñg‘œœìe»1yÉk”í¦×íéí0Ô–uªló:BÎÆ‘ÎÙt÷„††âã?†N§ÃâÅ‹¡R© ‰L÷ z=ôz=Μ9ƒ+V[{“ÕU8[_üüü׳%Ð1g‚Ç5øúú"66GtèŠCÜ{cCú+¦Ö‚ÈÈHÅEŒ9ÙÙÙ¨¯¯‡6W»Ý+®äˆy|©¸¸8&x\Dc¢g×®]¦û‰ül§ï’HDt»Š‹‹‘‘‘ÂÂB””” ++˼Î`0`èСæ×žžž˜3gŽËÆêÂ… ظq#:•J…k×®A&“!44ƒÆC=„°°0*+cK;#—˱páBx$Ì€[—öûCÕ5æiÒCBBxcîbzöìinª;›£º†Aé„… "bÌ,¸wëØƒpÑ>óƒÑ­Ì@Žý0sý¼[mJ®;IÙ6êÔæÏ#•J™¼t1¾¾¾èÝ»·éá¤JýmŽóbOçl""[X¼x1²²²PRRÒê6R©C† AFF†Ë^Wwî܉‡z«W¯ÆÉ“'QSS½^«W¯"??kÖ¬Á´iÓ°mÛ6*+cK;#“É P( ú¹c‡F[|Ðüsã¨îäZhþV_¡„{›„v”B¡€W‰„ MûÂês+žÈÈHsr={öÄéÓ§Q__}…Ò®[Îu¦lëË Ì­AÙ¤Ü5õë×*• Z­ú ¹v;ÖKgÊ5‘­^ýu¤¤¤ðž°•-Y²Z­)))˜9s&z÷î””À7ß|ƒmÛ¶áý÷ßÇßþö7DDD˜±¦ÛÇ–<®ñ¡Ó××—ƒº(___s=]éqÄFšŽÃÂqx\Wt´©›–±¶ÜiZÎ5ÎÐ#‹ÙÔE‰ÅbólœsŠˆÈuëÖ ˜àiÃ短V‹xo½õâããáããcÃÇ>ú(ôz=>ÿüsÍŠ˜äq`FuyX>tº¶Æ.[å¬Ow)ª®®ñAØ™†?GÓÏF®'00ðúÉNÓ©˜‰ˆl-''ÙJ%ìå«Ì¯¿þ‡îP‚gãÆØ¸q£Ë³ÆiÐyä‘6·»ÿþûÇŽcA·"&y˜¡¾ÊüsÓÌ(¹žîÝ»ßx`³ãñûIØÔ=€›»¶¦ >cC½s|¨ë]µ˜¼tm~~~7ʶVÍ€‘ÝÈÉÉÁwùùøÅht¸}_ºt)–.]êrǬ²²²Ù3JK[EUWW³ [Çä!r1†Ú2hó³±¼DŠiÓ¦qúÛjl%ÅaòõõEuuu‡ºkiŽ®ÅúB¤Œa—Sò6m±anÉA.[®ÍåâJq»ãòhŽ®åu„œŽ½Ÿ³É>;v +V¬@aa!4šŽ6cÆ ÄÄÄ oß¾˜>}ºó'ÜÝ¡ÕjQ]]ÝæýEEE6X°6¶äqä‡Îk7ºæp׿ååÕñr£UÃP¥B~~>êêê<¢[¸q,[S¶ÆP¥‚êìó7Zvwa‹ ºE¼Ž³–k{>g“}xå•Wpúôé'xÓ@Ä[·nÅÛo¿í1ŠˆˆüðÃmn×8ý<]¶ò½*C`_*++±ÿ~èÎ^(dIëYM§é*@·­KnͶmÛP^T½8Ông“!bÙ&b¹&"ûÑØú$##ñññ‰D­n›””àÆ5®â·¿ý-NŸ>÷Þ{Ý»woÖ2®´´™™™øàƒ B»c÷¥¢zô@×VZ…±%ž4¶nÝ máÞ}KLD·nëÖ­(ËÝmž]ˆˆe›ˆåšˆ¨-­T E› W6}út 0×®]òeËš­Ÿ4iV®\ £Ñˆôôt$&&2hà B*…B¡hq=[òuÀš5k:¼í‡~è’1òôôÄÿýßÿáÝwß…···y¹››¼¼¼Ð£G 4<ð€9iFÖÃ$Q'᫯¾Â‘#GpþüyÔ××C*•"44 ˜2e âãã]6>‰/¼ð‚Ųƒ²àÜLòuÐúõëñÎ;ï@§ÓY,¯­­…R©„R©ÄƱ`ÁÌš5‹£;ŠI""""""¢8|ø°y–¬‰'bÊ”)ˆŒŒ„L&C}}½y&­Í›7ãŸÿü' † ÂÀµB¯×C£Ñp"+b’‡ˆˆˆˆˆˆìRrr2ÂH•J»ØŸÆ1yæÎ‹ X¬óööF||<âããáëë‹O?ýëÖ­s¹$Oã¬báj3Ùg×"""""""»”œœŒßöéƒ8;ÙŸ'N&OžÜæv“&Mäææò ¶ÂËË AAAg0:¡ÀÙúzäçç·¸ž-yˆ\ŒÈ?Ò”ð?Ó# "“¦¼€gÆ„"º› “ó•m^GˆçlrEW¯^¶¹]HH ¦¦ÆåbÔZ«ƒÁ€ÊÊJ(•J¬]»………xöÙg1nÜ8¬NøÀÞóçåË‘‘‘Ñl=[òu€¯¯/ ¬¬¬Íí***˜Z«‰››‚‚‚œœŒ÷Þ{C‡ÅK/½„íÛ·38ÖŒ3C`_‘šš ÷ˆd¸Iý"JMMEP¿QpógQbÙ&b¹&"j_\œ©ãX{‰‰ï¾ûзo_­sæÌ|öÙg †±»– @ZZ²ëÎ0D6–––†¢ìÔVh bÙ&b¹&"j×Ì™3±{÷n|ôÑG¸|ù2RSSÑ»woH¥R¨Õjœ={™™™øòË/>ø ƒÖŠ=zŠŠŠ +b’‡ˆˆˆˆˆˆ¨ „ôôt¬X±›6m¦M›ZÝvöìÙHIIaÐZ¡Ñ˜’öžžž †1ÉCDDDDDDvI¥R¡¬²zÝìdŸ¦OŸŽÄÄDlÚ´ ÇŽCii)Ôj5<==Ñ­[7ÄÆÆâþûïÇÀyÛàååÅ©Óm€I""""""²Këׯ‡R©„ÜhÄ,A°›ýŠŠŠÂ /¼ÀDv‡/¹£ºº³9ضm*++"ÒÍÁþ²——Ç`Ó•m^Gˆçl""ûÖûì3ƒî(&yèŽÊÎΆ²²²îê~,^¼¸Ù2‰D‚?þØ.ЉX×È}ùå—ÈÈÈÀĉ±zõj|ñÅxñÅÑ­[7¤§§#??ß®ö·¥ò|«Xˆˆœ[=EEE ÝQLòÐc4±mÛ6,\¸Û·o‡Á`¸kûÒÒ7Ä‚ ࡇÂûï¿ÏƒE¬k¬kdcùùùضm–-[†{î¹2™ ÞÞÞP(˜6mzè!ì߿߮ö¹³­‹ÚÂz@DÔ1r¹½ºvE7ÛoFðôôäA¤;ŠcòÐsäÈtëÖ ½zõBÏž=qàÀ$''7ÛnïÞ½Xµj***gžyÏ?ÿ<6nÜ‘Hعs'6mÚ„ÒÒRbÆŒøÍo~­V‹9sæ`éÒ¥ÈÈÈÀ¯¿þ ‘H„àé§ŸF—.]ðÀ05…5j^|ñE¦î-#GŽÄž={pðàA 2„X×Z©kÚ¬o=ö˜y&Ö5jÉ–-[ðÐCÁÇǧÅõS¦Li¶lÛ¶mØ´iÊËËŒ9sæ`Ô¨Qþ›m•g8pàÖ¬YƒââbxyyaРAxüñÇ!‘HZ½v´·_ ˜;w.þ÷ÿï¾û.Nœ8F½^Ïz@DÔÓ§OÇ©5ß|ãPûíååũӉIrn_ý5¦Nj¾yÿì³Ïš=xîÙ³Ÿ}öþüç?£G8tèÞxã hµZóCç°zõj<ÿüóèß¿?òóó±lÙ2">>uuuX¾|9yä¼ôÒKhhhÀêÕ«±råJüõ¯Eff&ÆÌÌL‹¿ÝØÚaþüùX¼x1bbbÐ¥K§;nR?¸G$cÜ€® dÁd]»åº&‹[­o«V­rùº˜¦zÑ}úôaÁ¼Inn.fΜÙáíwïÞ-[¶àÏþ3ÂÃÃqá¼ýöÛ‹Å6lX»ïo¯<?~ï¿ÿ>-Z…Bk×®!++ o¼ñÞ|óÍVËs{û%‹¡Õj±råJŒ;þóŸa0ÌI,G­¼ŽÏÙDDwÇöóæ!##£ùóCDwBqq1ªªª0`Ôju³>ª_ý5žzê)DGGC&“aôèј8q¢Ew“Æm!‘H‡Ç{ ß\Ïîk4<úè£:t(d2üüü0{ölœ:uªCûêçç‡3f`åÊ•Ny,‰ĽG -- ,œ¬k·\×n·¾9{]qïž2 …‚…ó&—/_FPPP‡·ÿæ›oðä“O¢OŸ>H$èÕ«üqlܸ±Cïo¯_çv1îîîØµk§?$Ö5Ö5²¢qãÆaãÆ(++kqýþó¼ûî»æ×‘‘‘8yò¤Å6§NBddd‡þ^{å9** GŽéôç¸Ýýb= """kb’‡ljïÞ½ð÷÷GXXX‹ë»wïŽÀÀ@ó8ãÇǪU«pîÜ9Ô××ãÇÄ÷ßoñžÉ“'cåÊ•8~ü84 *++±zõjüõ¯íð~B©TB­V·»íüùó±víZ‹i‰X×X×èöÄÄÄ`̘1øÓŸþ„ü×®]ƒZ­Æ™3g°víZ|ùå—3fŒyû´´4|ðÁÈË˃F£R©ÄG}„ÔÔÔý½öÊó<€?þ?ÿü3t:ª««±qãF,Z´¨Íò|»ûÅz@DÔ¶õë×ãýýûñm'Æq#reì®E6õõ×_·Ú² Ñ½÷Þ‹Í›7c̘1˜8q"AÀâÅ‹QWW‡þýûãÅ_Äïÿ{óöIII˜3gV®\‰’’H$ÄÇÇcþüùÞ¯çž{¯¾ú*t:Ö­[׿¶¾¾¾˜;w.þþ÷¿ó€ëëYÑüùó1`Àdffâ½÷ÞƒF£A`` ðÎ;ï {÷îæm‡Žªª*¼ñƨ¨¨@pp0¦NŠ#FtèoµWžãââðôÓO###çÏŸ‡··7âããñâ‹/¶Yžow¿XˆˆÚ¦R©Pxù2´ Q‡0ÉC6õÎ;ﴻͤI“0iÒ$óë &`„ æ×µµµÍz=z4FÝâïËÌÌlwybb">ÿüó½¯ñ&¾­õŽD¥ ÇÖaÞ.`áÂ…œ&”uí–ëZGê›+×5¨ßõ–íRSS‘––Æ‚zAÚ-cMMœ8'NìPYl©µ÷·Fމ‘#G¶º¾¥òÜÞ~µ¶/Ž^êw½Åë9ž³‰È°»ÙLß¾}áããÓ©e99902dˆyY^^är¹Å2À4#Êí,‹µx mi™† ÒìÁ—ÈžôèÑ£Ù4æí-ËÉÉACCƒÅ²¼¼<„……¡GèÑ£G³÷Þ¼,66ÖæËˆˆˆˆˆ¨ã˜ä±3•••ضmtgs`T×8ôgåæbPïÞæäJll,ºîÚ…„®]-—ee!!4>>>ÈÎÎÆ·ß~‹Ë舢¢"¼ÿþûx饗 |ú){õ‚Äb1 ãG!1:Úb>øIýúY,3¬\‰¤¸¸Ë’’àõÉ'ŒèêjËe?ý„èÚZ‹eÞûö¡Fc¹ÝîÝè£ÓY,óÊÎF_77&zX¶‰Z´|ùr}ÿ)tr ""rY£¼…ŒŒŒ×3Écg***°uëVh ÷ÂP_åП夻;Êþõ/Äûø !!²õëñ«‡ÊW¯FB@ ýôSœòõEÙ»ï"¡{w¬[·»wãž·ÞBD\víØO?ýãóòðKh(.½ö£¢0xð`Ô½öŽ÷ê…‹/¾ˆ¤˜Ó²%Kð‹B Ï=‡¤¸8 <×þügœ8¥óæ!iÐ Ó²?ý ¿$&¢öùç1xð` <µ â—ÄD\ûÓŸLËPùÜs8‹kù =NhëÖ­(ËÝ ƒƒ?WI$8uà¼vìÀ!C}íжo‡òâExgg›–UUáì®]È?wÞ?þˆ/¾øÆ‹‘8kz?ü0¾xç¬Zµ bcqöøq?ŽH• ±±±ˆ,*ÂÙü|:„¨‹1dÈtÙ·Ê‚þø#¢*+1dÈxÿðò‹‹QôÝwˆ®©Á!CàõÝwÈ/+Cá–-èS_oZ¶};òjjPôŸÿ@¡Ó™–mÙeC V¯†‚‰–m¢äç磮üŒ~DDDdK“‡lêÔˆè·nd:”àr` .ƒ~«VA¦×C9hªüüPuß}À?ÿ ƒO,Z„G1ß|ÿü¿Žµ¯/NÝ?ð׿B,á×1c ñöÆé©S?ý ž8ý»ßA#‘àôŒÀ‚ððôįiiи»ã×Y³€Ç‡‡TŠÓ>­ÑˆÓ<‚˜?üœzøaè œž<ýÒÓqM¯Ç©™3¡Õjqúw¿CÌ_þ‚žÏ=‡‚‚X²;½zEEè¹lŠ<ôiýõÌœšòÿìÝ{x”õÿÿ×ä<$ ’ ŠŒš@°(Èà¡‹(n%u7½hìAÔ]k­[º]@ûÕÖ¶nÕ­Úþ­‹õµ*ز­Á­éª«BÃAA ˜ÈaÈäœùýg$ä4“Lf’Éóq]\À}ߟûð¾ï™û¾ßó9ü£ZËʤ*Àaxª½äÕOŸ®Ö¶¶Ï§MŸ®úË.ë:í²ËTÕeÚ±éÓU•ÕåszüòËUsÁg÷øŒ:yAÙãW^ÙmÚ±«¯Ö‰ §Í›×mÚÑk¯Uå…Ë-\¨ÕÕŸ?ƒ øˆ$†žÁ Æææ®/kC—$)"¢KB&XÓº-ÓÛ´ÖV<öŸµÖöönŸ5_§õø9íaÚ`¶áÓ4©3éC‚ð o¬a€š<€a)//OÕï¼£öíÛ à’<À(cˆŽ“!!MÓ“2™LÊÏ[BšRÇÇÈüYgò@8]ÛÜGÀw6‚Áb±h¬Ù,'¡$IÇ$8}Zc7oÖâÅ‹»Í'ÉŒ2c“7ûëZ™7`C,nö×uû/Rz /¿k›ûøÎ€à;&i«Ã!åç÷˜ä¡O€0@MžaÆb±hùòåzúÿN(b\2†Ðòå˵iwNµŒVûUSS£“'Oª¾¾^µµµœ¨HJJÒu×]ǵ p]ÖHò 3&“IV«U‘{85ÀP³Z­S1F†Úæa±?555:xð ‰!P[[«úúz%$$pm\ׄ-2 0 ;vL»wïî2-qüXYRͲL¦Èrœ>§m—I’ZZZÂI±èàÁƒÞÿ/¸6S9‹®’ÉKp©ôð o’Œ<………:Q^®XI3 Ð/’<BÇŽó&xŒq1ºëö/jVÖ%@IžòòrYÜnÍ4ÐF×F™ŽsÕjÞýšV¯^-»ÝN@B¨µµU{÷îõþÅý9$xÂLóî×´áŧUXXH0v×6÷ð Ã5y€QÆÝÚ¤Ž3v•‘\. ¡C‡©­­M’´ø¦/Ð÷Nê8c—ýŒäȺœ` ì®mî#à;†’<ÃŒËåREE…:ΜaìD¢â 0DÊÊÊÔPS#w³I†¸ø oÿøñã’¤ôK')gÑUœ„͵ p]04²%MOMUb^^óIò 3v»]¿üå/%I1WÞ®È S 0DV¯^ÝùEx‰MÑ—^Ôm×××{·N›ÌÉ@Ø\Û×5C'AÒD£QY­=ΧOžÌ`ïýwkk+Åêëë Âs~‡Œé$y IžìüªÊgΜ! £XKK AaÎÿÌÒÏÈÒØØØé~¸‘ IDATy—<òÎû±€¾Uà}¾ˆ1Fêó!žÎ[Áù7DÓÓ¼ô$¤Iš„ªÉK€GO2$úL;ÿÇ’<£[MMÍç×؉€Š$ÏH>yçýŠ\]]M@F±'N|öÒ«ˆ±Éd(^†?KžÕÖÖ ^„;¯‰ójÁŒh±‰j„ŽnN§óóç‹p¹¶yB0‚_:£â‘4]’têÔ)2Šy†ñ‚1D"'fHê¬5EH£—'¡ÚyML“kûóûý»^•••ÿˆgä*ÊÍfÓééÊ6à’<#ýá<ñbIÕìÏÿ…£Ç±cǼÍ,<‰ Ý‹°$:tˆ€ŒB­­­Þaï iaÑ\ëüû×öèU__/‡ÃÑí»†›Í¦›224“P÷mIGUVVÖã|’<#ýá|R¦ QýyìÞ½›€ŒBìüGl¼Oçã’såíZ¾|¹,jþøÊ§ÈIY’¤ãÇ“T…:äíÿÊ×á›c®¼]ywÿ‹l6Ûð½$¥Ë0v¢÷©Í3úìÝ»×ûïè)s|¾¶¹ ÜŒ„ïl(’ôre¥V¯^Ýóû!ù/žQŸ½l¸\.8p€ Œ"»víòÖ≾ôZŸj¢â9aЬV«L&AôCgŒc½/E¼ õõõÞ„ª!!M‘¦øT.rÂY¦N—Ù<¼GP‹N¿QRgsÄó_øþŽ;æmòu‰Íç¦ZÜGŽFÊw6ô…$Ï0c2™”žžÞÙÀÇQ’¢Òf{…=xð Ž;F GɃ¹·éÈØ‰Šº(‹ ø)==]¦¤)>w kˆ‹WdÚlI”îÚµ‹ Ž.—K~øaç5«ë°»¶#'Lñöñvüøq~0%jjj¼µ€ Q±ŠšrUX]׌F$y†‹Å¢•+W*nö×ý%)væWº4Û"ÑÞ>ÜåÁ<ö _#(°råJM]p§_ ²èK¯õ6Û:yò¤¶oßNž0V__¯-[¶x›iE¥ß8"F°ȵsÅ—ºü`@žðæùþòÜGb¾ðµaßÏÔ@®kF’"Æ&+æ _SËî×änkÖñãÇuâÄ ¥§§kÊ”)´›Á\.—Ž?®òòrïC¹bã“˃y¨^†¿ð5µ–½§öSÅr:Úºu«¦L™¢éÓ§+!! `'OžÔÞ½{»$7¢/ÿÒ¨¨=`ˆ‹W¬íÛjÞýšÜçjtòäI¦ÀðG’'ÌDŒMî|@/Ú$w}…ÚÚÚtàÀ8p@ñññš8q¢bbbÔÑÐР3gÎÈétv}KHSlv.5xBù2§˜+¾¤Ö¸xµ-”ÔÙ—ÉñãÇ¥‰'jüøÞûðttŠà9~ü¸w˜èžÔÔÔt?/±ñйâK£ê%Ø›Ä<²UíÍB=÷“ɤ¤¤$3† j„hiiQMMM÷‘ëB~(0ì­ZµJåå岸ݺÃ` @?Hò„ézÜ쯫ýôqµ–¿+÷¹Îf$N§³ÛCFعMHëìf/œî¶&¹ÏÕ¨¬¬]iiiü2?HÑ—^«¨É3;_ˆOKê¡ÈSǧ£rß¾Gc™6Ûç¡Ò{ÓqÆ.ûQ—£&¨ÑZ QqŠÉX¨ŽÉ3ÕRúŽÜõ’>¯Uˆ,6¾ó{k5Ó:ÎØ¹ ìŒÔïl8Iž09aŠ"çÜ­ŽsÕê8cW[U©ÜMN©™DÏHz7ÄÅ+*Ūˆñ–€üâÚq¶Z-Ÿ¼®Õ»¥åË—ËjµçÁ¾ÇuÖöpg,P{Í!µ×”ÉÝTïM°bœÃ¨XiÌDEŒKVdâÅŠLJÈz›w¿¦ »¥†Å‹•““3ââ16Yq³¿.w“Síµåj¯;Ƶ=Bï#ã’5yfÀjî4ï~ûÂÎHÿΉ$Ϩ16Yc“õÙÐφ*Q§¨‹²|ú…¼õÈVo3/GÌ•·Ó÷È@¯í¸xE¥Íæ>bã%]§‹¥Çù$y†»Ý®7ª¹ºQÑŒv ¥Õ«W«ât³Ú’2Gt§º†ø«ºM3ceM¿DßÌû{}ÿþ¯)22Ò»¬Ûùñìƒg½Cµ Œ¾kຠ«™’®JKÓE+Wö8Ÿ$Ï0ãr¹TVV&Ir·6`y>kQcRGü±\˜TillRUuž{ù¿õÀþ?ýò?–÷¸\0ö\Û×5ÁA üqºäâÉzxÅÝzñ•7 0 ä€0Vë8#“1Îûÿó›vµ´´jÜäëµý£}š}ý7›4Oi—}IϽ´©ÛzÖ½ºYÙ¶¯É”2_WæjÝ«›{Ýæ…Û¸(}‘ö<¢›n»_ã&_¯Ä‹è¶Û—ë¸ýÔ€· ;šk cí'‹½ÃÖÒúì0Ô§\cãûlÃÞv²Ø§‘·"'eÉßãcß~r_ÿ±ïc_:ÎU«µìÝþ¯¥±“±°×ù-eïø4º]_ý:âxÛ@O c $y0Ä1ÅjÿìßÖ97êòÌì^—ýÓ{[û]Ÿyò%ºnáu½Îßrp¿'Žö»ž¹Ù—+)mBój+Nkkaÿûˆã‘¤[o½µ×yvìRé‘­A9ž@Å6ÇÓl?À7™L£ú3f4ÕØØ¨Ž3vï C0>'vìRi€“<v¾@’fµZ•-—ËÕeú5—§É–•Øk¹’ôô~×m±Xô÷}¬ÃYr©ìc¢û]Ï3Sd±ô¼{BƒªŠûß—@¤>'Á™¦ŽÓéA9ž@Å6ÇãklM&“,½ »7Zäääè“O> úç$Á™¦šC‰ª«« ÜKÏ :;þâu]‡Å¾æª,:òyí…ÝEµð‹sº,³à†«ôУOû¼æ¡Ëÿ'¥˜å¨«è6ú“ššê}pöwê¬Y³‚zm/X°@v»ïÝ|Vù½;˜ØóÝ_lCuî×u_ûÂu=ü¯kϾ˜L¦ g£o+W®Ôé7ß”ó­·àƒÛív‡ëÁ}ûÛß–$%Ï£”Yc}*³o}•$iñâÅÊÉÉá ΓŸŸ¯Í›;ûI™±4¥Ïe«öœSõÞIÒÚµk ^±\»êÞÁ}‘û1dù…Ëâ¯RÛéÞ!Ö%©µµMc/ºN͵Û$IQ檽½½ÛºbccÔTSØm½=m£§ý;º/ÛˆÒÃ'ôËg;ã¼|ùrY­V.>Àˆ~·ùî¼HM7‡¦kÕ5…­:\üw¥`$y~çvËn0hÒ¤Iúò—¿’ø~üñÇÚ½{·$éáax ~(ÉS—óÞ{ï É>œ8qÂû ý IÂ’ŠFÅX,Z±bE·ùÔä€QîìY—Æçý½óœÌ‰ Þÿ'ÄÕ§ûþ¬øø±C¶ÁØ0Ò‘t´±Q*+ëq>£kÀ(W¸so×ÿï(RÆôϹzöúÛö¢!݇`lw$y`‹ŒŒÔ£ÿñœ¶~"—«IŸÔ~¼F+¿÷Mï2ßÿÎ×ôÝžÒ[w©±±I'NÖè‡ÿþ-^òý€íG0¶„;škÀ(÷«Ÿ/×÷ÿß/µ·¸\Iæñzì‘ïhñ-Ÿ¬sËM6=ñãÑwW>¥Òòc3ƨ…_œ£§W=°}Æ602¹\.FºðIáüYëÂeÛÛÛe››­ÿ·®Ïry_¹Yy_¹Ù§õ^¸ÞöïÂéým£Oaa¡Ö­[§´´4­X±‚d@?Hò`ÄØ³g***TvÁ‚¼ÀâIðHREE…V¯^M¢F¡ 6èèž=Jt»u³Á@@€~äÁˆ±qãF9Ž•u»ÝAæ0pç'x<†c¢§££C§OŸ–Ùlæ¤À±Ûí:RW§VBø„Ž—1bx<Ñc"dJŽöé`d9?ÁmÐôÅfM˜'éóDËå é>º\.}òÉ'²Ûír8úñ¬ÂÂBN9jò`Ä™0ͨ”Yc}Zvßú*ôÁŸþ|€¡va‚gê¢D£”6?A’túpSÈkô¸\.­ZµJ­­­ºãŽ;$IW^y¥w¿m6'„ Ir½%x<†C¢çüÏ’%K¼Ó333%‰DrÙ’¦§¦*1/¯Çù4×!Õ_‚Ç#m~BÈšn]˜à‰‹‹ë2?33S·Ür‹6lØòæd |%HºÄh”Õjíq>I2¾&x>~Ðǽÿ~=zT×\s $yaË~¢óE?11‘`„Ø7Þ(£ÑØcò¥·êƾHOO—Õjí6}Ö¬Y~¯«½½]‡RAAA·}÷w}±±±JNNÖêÕ«»Í›3gŽ""h1$ ,¹›U~ä”$õ˜@ðÙl¶§&ÉcµZ•““ý‹ŽŽVnn®rss»Loll”Ýn÷k]2Z»v-' ?#ÂÒº xÿ=oÞ<‚ ªªª¢£et$ya§ð£2í)>*©÷æ<€¯*++åt:ý*SPPÀ°év»]GU À'$yaeOñQmüsç˵ÑhT^^AÁ lÚ´IÅÅÅB`Æ znûvýÕí&€è“\ÍZ·áo Iºë®»d±X l4¶B¸mâ w$y ÄÊŸ$ä8}VöÊZÙO8ºÄÑh4ê®»îÐK gõš1)2$Û>q–øÃI±ÕÏæ„JOOW^^5xR)))2›Í@@ïoååå:\šfKûN}¾ÝŒŒ N"KŠœ0AãæÏïq>IŽ+11QV«UóæÍ£“e\jjªL&“_e-ZD¢@@Y,•——ëí>íÐ Sƒ×½jc«[ÿ]Ò&©³¶,ßo@è\,)ÝlÖE99=Î'É!`µZõ«_ýJv»` BRRµ%0ärssù¬¹œœíÙ³Guuuúï’vչܺáÒH%‡v»ûNutn¯±óÿwÝu—߉oÁC’BÄd2QëøüÜpÿý÷kõêÕjllÔŸvèƒO;‚ºóæÍ£¿;`˜cu€+**Rii)P‹EO<ñ„ÒÓÓƒº]£Ñ¨ï|ç;ºë®»8 À0GM€>ìܹSmmm~õAQTT$·ÛMm=g2™´råJÙívÙív9Ž!Ý^FF†, M´€‚$@vìØ¡ääd:0¬X,–Qñ½d±XÔæpÈ<ÄÉ, \ä€0år¹TTTä÷/|F£Q³fÍò»Cãüü|ëÞ{ïõN{çwôÞ{ïuYnÁ‚Z¸p¡$éÈ‘#jmm•Ífã„€nòòòtÚh”ó­·à’<¦¶mÛ¦7¨laa¡yä¿ËM˜0¡KRiÚ´iJJJê²LBB‚w™’’ÕÖÖ’ä|𡤭‡Ißþ¶Ö®]Ûm>IS.—kÀe+**T...®ËÿãããÏÉÀ¨“ôŽQHòÀ(0ciŠOËUí9§ê½  8ÏܹsÕÖÖæW™ììlúðAG’f³Ùïa`$˜3gŽìv;ÃI@@Øl6•••ùUÆb±(##ƒàA¡b±Xèta©  @………À°²gÏžAõÙ`ø#ÉЇM›6©¤¤Ä¯2UUU]Fš€Ps8zöÙgµjÕª•èÙ°aƒžÛ¶Mu»9‰€Hò¢´´”~K–*++U__O Œh­­­ºãŽ;ÔÚÚ:¢=v»]GêêTÅ)|B’eee~'yJJJ´aÂÀ:uꔜN§’““µdɵ¶¶ê™gž!0@¢ãe@ÈÔ×׫¶¶–@0 U__¯iÓ¦y§ÅÅÅiÉ’%jjjRGG‡""øÝ'|¢lÑ¢Et* ¤ µnÝ:555u›§ñãÇËn·«££ƒ`#Èõ’~:}ºÖ®]Ûã|’<}X¶l™ß ›””™Íf‚ $< ž[n¹E™™™½.×ÜÜ,»Ý®üü|FÝÂI@@dddÈb±BhÏž=>%x<êëëµsçÎ7ꀞ‘ä„Õjõ;É“••¥¼¼<‚@€Œ?^·Ýv›O éó>zFÚ¨[zF’2ñññÔþÁ°WYY)§ÓéW™õë×+??ŸàªS§N©¥¥EÓ§O÷«Ü…‰#£kôaÓ¦MJNN¦#eÃÖž={4~üxµ´´ xžDOUU£n#Ÿ\@@äçç«°°@D………zöÙgµÿþA¯+..N_|ñ°ukåÊ•úù­·êƒ“ ø€$ ¤¦T `à÷QO'Ëþ6Ñê‹gÔ­O>ù„>z€af¯¤—++µzõêçÓ\ ÀBÀPó$x®¸â %$$¨¢¢Â;/--mPënjjREE…þô§?)))I+W®”Éd"èÀ0pFÒÑÆF©¬¬Çù$yúššê÷ËÍ¢E‹èTÀJKKÓ׿þu½úê«Ýšj=üðÃjnnöy]ÕÕÕ:|ø°¾úÕ¯J’:¤ 6H’êêêäp8Hò#I€>äææÊn·ÃÊ”)S4eÊÝpÃ ÝæùûÕÜܬªª*FIÒŒ3´víZ‚ Œ@$y‘““£²^ªz$y!c³Ù”‘‘A vŠŠŠär¹dµZ € r8ªªªR||<ÁF!F×èÃÎ;ýnúPTTDÍ6!QXX¨‚‚‚°:žwË˵—S ø„$ ‡œN'@ØÙ±c}òk'NTnnî°Ü·ÂÂBýoY™ŠÜnNà’<€€=„ûUÆétòò @ˆÅÅÅ)55•@a€$ dŠ‹‹½C´:^°ììl¥§§Ag6›•’’B ü÷Ùß---c„³Q÷Œ!izjªóòzœOM€>Ì;W‹Å¿°ìlFÖ6›M‹-ò«ŒÝnך5kFuÜR IR]]]Èöáܹs’¤øaÚÿФö5ØNœ8ñù9¥×j‚¤KŒÆ^Ÿ3HòÂl6+!!@ ìÌ™3Çï$`d¹ø¼ûÛÇ` 477ëèÑ£’†oòâü§¼ƒ†d<1²Ðw¯HòÂf³)33Ó¯2‹E6›à€JÑç5h>þøc9Ž m»¹¹Y|ð·©XögµŠ†cŒ<ɕݻww©U ï¿ÿ¾·¦ÕeÃ4FÃ}òBÆb±(##ƒ@ ìèŠ+® ‰ èJKKU^^ÎýÕOq’– zAýòüñTVV–&Ož¬ØØØ!Û®Ãáо}ûtöìYIÒUn·2†qã˃ž—Ô,ióæÍÊÈÈÐäÉ“5nܸ!ÑÁƒ½ ‹Û­«IòôŠ$@6mÚ$“ÉäWMµªª*:>eeeÚ»w/IžH‘´XÒÿª3‰Q\\Ô¦[én·®æÉ‹I9’ò?‹QYY™ÊÊÊ‚¶}‹Û­%$xúD’¥¥¥ª¨¨ ï„ÊÊJ%''ak87Ÿ¶Ùlš"ÉX^”íÍTgÿ<u»U¤dB²¤ë¥a]ƒç|’þEɰ2u&{†Z¼Û­¹5x|@’eeer8~%yJJJôÉ'Ÿ(¯—! Àèf³Ùtº¶VÎ %y¤ÎÚ*žÚ"U’š†p[Ðó§Î=’T/éÌn+ERɯzI :[VÖc=’<€ÐݤêëU[[K À°DÃÛþ%¨ëÈ[ZE’¶VVJ«WkíÚµÝæ“ä°E‹iêÔ©@ÐeddÈàg­‡¦¦&UVVjúôép€þWR“Û­› Å„C¨ô!77WYYY~•III‘Ùl&x‚Îjµ*;;Û¯2555Ú´iÁ |·[IÚg0è·{H›wý¡& 222TQQA vRSSÕÑÑA „ê%íóa¹xuvžÜ›­>nïÚ>æ“dÿìߥn·ªÎ«9Um0h­Û­lƒASÕ{Ÿ;:ž½’œ>¬g†zoRuþñôÅ¢¾û tlz<ÁŽm ŽÇŸØïg]$yaµZý®ž••Å0Ó OÑIIjHMÕ‡••ý.{q\œ®îcˆ}ìÀùÆôô^çU:ú°®Nn·Ûûì7!JÆÄ(>ܤƒAs»‘˜¨Œ¤¤×ÑàräxöÙí:ÖÔÝ¡KSS•l2õy<ýùbb¢2ú¨¥ÈØæx‚Û@O b+‘ä„P||ܤ³gÏjõêÕZ±bE·DO Žge_ -åäääG‰@O öe´Ç–Ž—QXX¨’’À“œœ¬ÜÜ\თ_~YÛ¶m“Ô=Áã‘6?A¦uޱUQQ¡Õ«WËår<I@@8Õ××ûU¦°°P«V­"x`Øó%ÁãA¢¡B’€>ø“àñ уP ÉЇÔÔT%$$øUféÒ¥tº $rrr´téR¿Ê8Níܹ“àõb =6’<}ÈÍÍUff&¶êëëµcÇуÁ$x>ž` B <ç¯îIôô4¼z8)++ë6íüg?—Ë¥ŠŠŠça`HòôaçÎjkk“Åbñ¹LAAfΜI¿<‚®°°P{÷îÕ’%KÆŸàéäÖ±ÿ;#IŠŒ‰ùr“ÆNŠñi]umª.:§ö–Žnó<‰žGy$ìb¸eËÅÅÅéùçŸï6ï‰'žðþûÈ‘#]–ùêW¿ªo¼‘‹pHòôaÇŽJNNö+É#IBB‚æÎK ÔYªk‚Gj:Ý.©Ýû½¿æö IDATÿ¶æ½%Ñ·õhÓÞÜëüŠŠ •––Êjµ†U·mÛ¦ääd­X±¢ÇŸíy–yã7hÂ$y‘ŸŸ/‡Ã!›Íæs§Ó)»ÝÎË3!OóŸ1›ÍZ°`ìv{·yjllôk}Íg;“CF£QiiiÝæ'%%ñô™E‹iêÔ©bHòB¦¸¸Xï¿ÿ¾V®\I0À°——×ãôU«V©¼¼|@ëLKKãy§)))ôe$y,;;[ééé@ЙÍf¥¤¤`”"ÉЇ¹s窭­Í¯2ÙÙÙT¿6›M‹Å¯fEÕÕÕúè£ôo|ƒ"d÷NF!‚ÙlVBB@Ø™3g a­¹¹Y•••!½w®_¿^ùùùo¨É›Í¦²²2¿Ê$$$Èh4< ¨É™ÌÌÌ^;7F²‚‚AWZZª¢¢"ŒR$yú°iÓ&•””øU¦ªªJ‡ƒàº²²2’<‘÷N͵QZZªŠŠ ú.AØ©¬¬Trr2¶,‹l6÷Î0@’eeer8~%yìv»jkky°å–.]Ê…@s-@ÈØívú-„š<¶hÑ"M:•@ºŒŒ  ¿Ê455©²²RÓ§O'€ÀGM€>äææ*++˯2)))2›Í@ÐY­VeggûU¦¦¦F›6m"x˜eË–ùÝŸA ƒ$ 222hG°”ššªøøxÀ*(( Ð\ V«ÕïêáYYYJII!x@P“2ñññÔþAXZ¿~½òóó € +,,TAA@HUVVÊétˆ ÉЇ5kÖP}Àˆáp8TUUåW™ØØX¥¦¦<̦M›T\\L B€æZ€€(,,TUU•233 #Hrr²fÏžM R ZÔä„ÃáP}}½_e µjÕ*‚0Ê-Z´Èï¹ÐI€0@s-€[ºt)Š ›Í¦‹.ºÈ¯2N§S;wîÔ 7Ü@©©©2™L"¨ÉЇeË–Q}Àˆa6›•’’âW™úúzíØ±ƒà!`rssýî§±¨¨H¥¥¥oF|M—Ë¥÷Þ{/àë-++ÓæÍ›»MOOO—ÕjåÊ€ Øl6}úé§~+**’Ûíæ}{F|’çÝwßí13Xeee*++ë6Ýh4ê׿þ5W\Àl6ËápøUÆf³)##ƒà!ìTUUÉd21Jªß\kÖ¬Y2½Î›¥Ät£ÏëKž9F‘ц>·FÊÊJ9N¿Ê¨°°àºüü|­_¿ž@ ¤vîÜ)»ÝN B`Ä×ä±X,Z±b…V¯^­ÆÆFIRÚüxM˜fÐúRfUʬ±’¤ö–)¨SÓévIÒ¼yót×]wqÕ0ŠlÚ´IÉÉÉôË l%$$hîܹ³cÇ%''3A„EÇËžD§FOÅßœ:}¸qPë$ÁþÉÏÏ÷»æ‚ÓéäWB,>>^sæÌ!©E‹ñƒJ„ÍèZLôà€à(..Ö† À(—’’B_vVC¨"ÑC‚ð  F¢ˆp; Á$zHð€ ¥¦¦*!!Á¯2T9*999Zºt©_eª««µiÓ&‚‡€™;w.ýñ„HT8Ô…1Wü­sDŒ¾:c&Á`¤r8Ú¶m[·éeeeƒZïæÍ›»MKKKc”AŒ:¹¹¹ô ¬577«²²’@ `æÌ™ã÷½sýúõš9s¦rrrà D…ëù“è!Á`¤r¹\úÙÏ~æ]0òóó{œþï|§ÇDONNŽß‰¥„„oÍKƒÎçKÓ-<F2“ÉÔo¿¦Åù¼>£9ºÿe˜”ÉÌÌT^^'a§¨¨H¥¥¥@Е––ª¨¨ˆ@£TD¸`_‰<ÂÁŠ+”––æýÿøiqš±4Åû'm¾ï}‰Ä[b»”¾Ø¬ˆhƒwþwÞ)«ÕJÐ1ªlÚ´I%%%~•)**t“Iˆ²²2’<‘÷NFÔh8Èžšnu´¸UwÈE‚g˜q¹\Z·n\.W@×»mÛ¶¶­V«/^Là1¢™L&ïw\EE…Înêü0¨õ6ÖµéHA:ZÝ’:<}u$ëp8ät:ÏIAX©¬¬Trr2¶&Nœ¨ÜÜ\îa j´è…‰žõÎ#Á3|¼ûî»Ú³gOÀÖ=&B­ r8r8Ýæ—••‘äAXt¢Çß$Êápø5¢ÝnWmm-£BqqqJMM%©¥K—2"WDŒ¦ƒ½°é–D‚g¸¹ñÆ»4;‘$Sr´÷Ï„iqJL÷½?Ésâ»”›Ð5¯¹`Á‚ްáIôx>Cg7Éþ·z¿×3Ï@Ùívrò€ë$Á`Á`è2Íb±è¦›nR{{»:¤|°ß2ÃíFÓKª$ÅŒ‹Ô´[5í–D¥ÍOPô˜HŸ×o‰õ–½dÁxInï¼yóæÑá+Âþ3äo¢'˜  œ-Z´ˆÏ€ÈÈÈPvv6F©€'y­Ëívëþûï× /¼ ÷Þ{O555Ãædøz ÃçÜÿ'""BãÆÓÌ™3µbÅ UVV†ä%µ't²Ñd Ÿ!<@Ïrss•••åW™”””~G¾€¡`µZýNòØív­Y³†à!¤÷Ϊªª»Ø€†¬&σ>¨C‡í@víÚÕmšÛí–Ûíî2Íápèøñã’¤>ø@ý–nÇ0\¹Ýn;wNûöíÓ/ùKÍœ9SÅÅÅA}Ií ŒFþ~†‘àÉÈÈ 5ÂRjj*Š0Ä÷΂‚šñÀ$y ƒ\.—–.]ªööö HO ’ž455yÿ}ýõ×+..nØœ _a8ñ$¡Ün·ÚÛÛUUU¥?ÿùÏÊÈÈP]]~ðƒí%µ'$x0šùú T «Õêw’Çb±P[!Iò$$$hÉ’%Ú¶m›~ñ‹_ø]þwÞQNNŽ’““£‰'ê–[nÑŸþô§nËþ×ý— ƒ·Æˆ§éÐÖ­[»5[¼xq—žš‹õÕ„ì½÷ÞÓ—¿üe¥¤¤(66VÓ§O×Ï~ö³.‰#-[¶èöÛo×Ô©S§˜˜M™2EwÜq‡öïß?àcHœd0dµZ½û¶páB%&&Êh4ꪫ®ÒïÿûÁ_LJNNÖ—¿üe½úê«Þm]¨¿fzçÏL¢‡Ðÿg(ÔM´Hò \­_¿^ùùù@Ъ  €@£Ô$yÎ;§gŸ}V“&MÒO~òíÛ·Ï粿øÅ/tÓM7ióæÍª©©Qkk«jkkUPP üÇÔÃ?’@ýú׿ÖÂ… •ŸŸ¯êêjµ´´èðáÃúñ¬k¯½¶K¢ç•W^Ñ 7Ü  6èèÑ£jnnVkk«ìv»^yå]uÕUú裵?þÄ)::Z’är¹ôî»ïjáÂ…z÷Ýwuúôi555i×®]úæ7¿©?üá‹—'¡1øKÌd2)==]uuu=¾¤ö„Ðõ3ÔS¢‡>x߬Y³†êãF ‡Ã¡ªª*¿ÊÄÆÆ2„:¸w†‰!Iò´µµÉl6ë¹çžSKK‹î¸ãµ´´ô[®¨¨H=ô ƒ|ðA8p@.—K‡Öã?®ˆˆýüç?×öíÛ½eî»ï¾.}Öxš]{íµÝÖ¿yóæ—íOII‰V®\©¨¨(=óÌ3ª­­UCCƒ>øà]vÙeÚµk—{ì1ïòëÖ­“ÑhÔOû¬ÚÛÛõðÃëþûï÷N¿þúëõÚk¯iþüùÚºu«wúÿþïÿv[GLLŒæÏŸ¯^xA³fÍê’¨ò—¿qò4}ª¯¯×?üÃ?tù,++KÏ<󌮾új *Nn·[gΜÑû￯ïÿûŠ‹‹Ó£>späȵ´´è'?ù‰ž{î9UTTèÌáÎÚS–ù ÞåHðÏ‘‚Óa„ñ$zV¯^­ŠŠ ïô@Õàq8ª¯÷¯ß¬’’}òÉ'ÊËËãA U-ªÞÛ@ …-ZÄ 1Ôøõ¯­)S¦èÉ'ŸÔŽ;ú\Ö“(¹çž{zœÇwHRЫ}}ðÁ’¤%K–t›7kÖ,544èý÷ß÷i]™™™’:›´ Ô`âô½ï}¯×}ò÷åLR·!Ô•››«éÓ§ëÿþïÿÖôã²Ë.“$ýë¿þ«¾ñoôØ¿ ž¡7̺ç\ax¸°FO¨›hÕ××Ën·sb Ij¨jUÕžs>ý T:Z;ú­ÕîùÓÑÚAÀTQC½øøx½ôÒKZ¸p¡–.]ª={öÈh4ö¸ìÑ£G%I—_~yŸ/úž!ЃåÓO?•$M›6Íç2µµµzæ™gôî»ïÊn·{ûÍikkôþ &NݦyÎG ‡jß¾}»~úÓŸê‡?ü¡®»îºA¯Ïs mذAo¼ñ†æÍ›§™3gª½½Ý[£§©®•ϳÙl²Z­ª­­õ¹Œ§& žDOcc#Ub1,,X°@ï½÷žßå{¼¯ K—.å×H!{^»è¢‹ü*ãt:µsçNÝpà ÐM§Û©ÕŽaËà䛽ämtáj—-[¦§Ÿ~ZË–-Óþçö¸lTT”ÚÛÛÕÖÖæí{å|íí튊ŠRddd·dIOÛím_z›ÞÛ¼ÈÈHuttôº_úôÓOuíµ×êĉ}.çË~õ4m qêë˜}™ïëò.—K'NœÐÿüÏÿè'?ù‰Îž=«ÿùŸÿÑÍ7ßìó¶z›êÔ)ýêW¿Òo¼¡O?ýT111ÊÉÉéö’J‚üü|9¿jª¶¶V+W®$€¶ý®qf4Iò»Ý®ÆÆF¿–ßµk—|ðA‚ׇ 6 èG IÊÎÎîÒõ÷ÎîÊÊÊ”žžÎ㬠=ùä“*((ÐÓO?­Ûn»M7Þxc·eÆŒ#§Ó©††ÅÇÇw›ïiâ4vìØ i̘1:{ö¬‡’““û]þÁÔ‰'tñÅë‰'žÐüùóe6›ãM¾ v†cœ¤ÎÓ§O×÷¾÷=Íœ9S÷w§ýèG]’<}髃îI“&éÉ'ŸÔ“O>©òòrèü£¤Ï›‘àBÇf³yk>„›¼¼<Íš5k@eIü÷¯¨¨Hn·›$Ï -Éc2™´nÝ:]wÝuºûoß>EEEu©‘sÉ%—hïÞ½*))Ѽyóº­cÿþýÞå‚iêÔ©Ú»w¯<èS’Ç“Ý}ûí·½M§<Ž;6èý®qºgßöîÝÛeºÁ`ÛíVSS“âââºÌ;xð OëNOOWzzº¾ûÝïê7¿ùþüç?«££Ck×®åS „ˆÙl–Ãáð«LVV–RRRÂNUU•L&M" Ì€ÀpÌÍ›7O<ð€ìv»þõ_ÿUãÆë2ÿú믗$ýö·¿í±ü‹/¾(I}öñÒÞøa›=ûµnݺnóŠŠŠd4»$[š››%I©©©Ý–ÿéOêýwoµVú;†@Ä)<m'&&v™î9ïŸ|òI·2O=õ”ßÛùæ7¿©¿þõ¯]F802ÄÇÇóˆ½ÊÊJ9N¿Ê} :›O¯_¿ž@ ¤vîÜÉà!ì >ú裚1c†Ö­[§³gÏv™÷ï|GQQQz饗ôðÃëðáÃjllTyy¹~ô£é…^PTT”î»ï¾nëõÔÙºuë€F‰ê˽÷Þ«ˆˆ½ôÒKzâ‰'TSS#—Ë¥>ø@_ûÚ×ÔÔÔÔ¥ Ov÷¡‡Rmm­ššš´sçNåææª®®N—^z©$éüc—D¯Ç0˜8 µÖÖV8qB/¾ø¢¾ò•¯H’rss»,3cÆ IÒòå˵wï^577«¢¢Bÿò/ÿ¢-[¶hüøñÝÖûo|CIIIúÁ~ C‡©©©Immm:zô¨~ðƒtY/´iÓ&a+!!AsçÎ%˜;vä ‘^;^ö·³® ¥¥¥½.sàÀ-Y²D­­­Ý–]¿~½üñžwÖ`ÐøCïáçËÍÍUII‰÷ÿ¥¥¥½îK_ûØÛ¼çŸ^¿øÅ/zܯiÓ¦éÕW_õ&'Þ|óM=ðÀÝ–KIIÑ믿®Õ«WkóæÍ]öÕßcð7Ný_Î[OË÷gÆŒz饗ºÔÚzë­·´|ùònËFEEéÙgŸÕ#<¢S§NéàÁƒÞN˜ßzë-=ðÀ½ÖrŠÕóÏ?¯9sæð©BÄSkÁŸŽ—N§ššš|j „ʆ d±Xüº¶_ýuM™2ů2¨ûññãÇuûí·û÷bøY— @¨îÅÅÅJHHÛZÞf³9(͸{ì“çå—_Ö¶mÛµâÕ«W÷9?;;[üqËÞzë­Ú·oŸª««ÕÜܬ¸¸8¥¤¤hÆŒª®®îqÝ—^z©Ž?.—Ë¥ñãÇwY¦·}ék{š÷÷ÿ÷Ú·oŸw8ô1cÆèÒK/Õ•W^©^x¡Ë²6›M%%%:{ö¬ŒF£&Ož¬«®ºJ¯½öšÆŒ£¤¤$ÕÕÕiìØ±Þmù{ ‰S祿ùý‰ˆˆÐ˜1c¯©S§Êjµê¹çžë¶Ü¿øEíÛ·OgΜQdd¤Ìf³fÏž­>úÈ›\|ê©§õù%ú¥/}Iû÷ïWUU•šššÔÑÑ!“ɤɓ'kæÌ™Ú²e‹¶lÙÂ7*B=õÖßͼ°°Ð›Ð†£˜˜¿8›››µmÛ¶A?OÀ@$%%ù]fãÆ:~ü8ÁCÈî Ú¸qcXÇåÎ;ïò€z¬É³jÕ*•——seü2oÞ<¿‡P'Ƀ‘ð :{öl¿®í7ß|“g)!“””¤;ï¼Ó¯2o¼ñI„ôÞi·ÛÃ>ɳxñbåää é6ú]+nB”.ºzW( _§5¨\Ü„(MžO1l¹wþºèy ®VÛÔZ‚ $®›z³_ËWWW«9ú¬¦Þ<à! £ß5yâ&D…í5xò£³j:Ý”mõ™ä‰ˆ6h줮P@¿Nµ ¨\dL÷ kS¦LRfB&×5€#2Æ¿ñuš››ÕÔТäIc "qÊDYüKò|´m—ZǵkÂ4cØÅ#":xµÖ£¸ü2k¬lýkcœ c"·"€Ñ®®ö´Üj'ƒA¡’™™IS-„¥ò‡un€µÛ`0ZÚUWsš@£I€>œ*«“Ýn÷«LùþÃnƒQWÞ¨í~D R‡¶Uª¤¤„@„I@@´6´Ëét„$y`$™8q¢¦ÏK%˜sŽFÕ×׈ ɈºòFûUÆn·ëôáF‚@ÅÅÅi¬ÙH R×\µÓ¹‹$ dìv»êÊIòŒv‰'(zL$$’<–~Å4au!3.R‰I0J‘äèäŒDY,¿Ê¤_>McIò ÓŒºæ†«ý*c·Ûµgó!‚‡€™>/UYYY~•©«9­Ö†P,’<€€ˆ©„„°3$£ÙX³Qñññ~•ÙþáG4ã€(B„ ӌʜ˜éW‹Å"{Ãk@M@ÈX,M˜Æ( ?Û?øˆ‘ã„ÄéÃÚþÁG¥HòôáжJ•”øW㬮ö´ZÎÒ¯€àk9Û®ºÚÓ!µgó!ˆè³¹VÓé6)¨#J€~µ·tèò›ì~÷]Â}ÃþÚ>¡úøz¿Êt´¹uæp£ªZ € j9×®ñcL~•±X,JIIÖ‘‚ ˆ€Ý;ý•0.AÇŽ:ÂòÞÙtº-hÛê3ÉÓÑêVCU+W(À'v»Iž’’;Õ"ƒÁ@ð0lÅÄø?J–»Ã­Ö†µ6t@Aç6ºðÚλBz}¥ÊJËÕì¤&ì`ô˜äY¸p!‘øÅápø]¦¾¾^F£‘‘‹0¬:uÊï2JLL”Ùl&€‚~?Žˆð¿Ejjªbcc Bvï4JOOë¸Ìš5kÈ·ap»Ýn.AÀ`åççËápÈf³ù\¦°°PµµµZ¹r%İõÔSO)99Ù¯kÛétjêÔ©$yÃáЧŸ~ê×ðÕMMM2™Lš>}:DÈîüð ¡ЇeË–Én·ûU&%%…€0›Ír¹\jlô}„¿ššíÚµK>ø „_ÊÊʺ]f³y@÷΢¢"UTTt¹fddd?‘äDFF†**è°`4°ÛíZ½zu—i ,ÐÂ… ÕÚêÿNСC‡ºL{â‰'¡ &p‘ä„Õjõ»嬬,¥¤¤<„ªª*™L&jó–Åbѯ~õ+9N9NïôôÓ(I·Ýv[·i¿ÿýï•””¤¯ýëÜG„*ñññ´½Æ°WYYÙåáÕ*,,$x‚.??_ëׯ' “ɤI“&ùÕ”¯Þ~ûm9rD×]wö5y~immÕ±cÇôßÿýß]¦OžgΟ?_×\sÍ€@õÏê„0pdd¤6nܨêêꀬ³ººZëׯWEE Œx ¡ î¾ûno¢'P÷cI“&iÁ‚Ô¤€_l6›î¼óN½ýöÛ~¡!‹Å¢ØØX<ƒ@MžaÄ3*Ûí&Fă¥$ï/ˆƒ©ÑS]]­7êÊ+¯Ôüùó .†•ÔÔT¿6-ZÄ‹€ Þ8Úµv»]W_}õt¤‹ÑÁf³y› ú:pÁÛo¿­Y³fiúôépþS©Á`ðëÏ`­Y³& 멚››µvíZ-Z´Hééé;v¬bccuÑE馛nÒoû[µ¶¶†dßFû¹F˃¥çĦ¦¦­ãüçANrssÚ †â~üÏÿüÏŠÔzþò—¿èÍ7ßTTu08V«ÕçQ·<,O:•ÀÀˆÿôîÚµkÔž<‡Ã¡… jÏž=Ýæ:uJ§NÒ;ï¼£uëÖé¯ýkЫ¼æsŒ¶ËyóæÉh4ª¹¹Ùïò§OŸ&ÁÀ yšºØíöÝÿò—¿èÈ‘#Z¹r%Me0“&MÒ©S§ôé§Ÿöøƒ £h ÁwÁP­Øívûô‡DÂÀ=ðÀÚ³g®¸â ½þú모¨PKK‹š››UZZª'Ÿ|RcÆŒÑßþö7­Zµ*èûG’=.»ì2ojÄÆÆêÖ[o%Áƒ°STT¤ÒÒR ¸/w2™Lzþùçýê£çü/Ú4—Ë¥·ß~[%%%]¦oÛ¶ÏP| ‡hhhÕj•Á`Ð믿Þã2¯¼òŠ ƒ¬V«ô_ÿõ_2 *..–ôy3±­[·zËlß¾]K–,ÑE]¤˜˜¥¤¤è¶ÛnÓûï¿ßmýçÎó®_’~÷»ß);;[111Šï2oË–-Z¸p¡e4uÕUWé÷¿ÿ}·unÙ²E·ß~»¦Nª¸¸8ÅÄÄhÊ”)ºãŽ;´ÿþAÇíÍ7ß”$ýùÏV^^žRSS­˜˜eddèÁÔÆ•””䱺ºZÑÑÑŠŠŠRUUUë=sæŒbcc£ššï0//OS¦LQll¬ÆŽ«iӦ鮻îÒŽ;º”ô¹ihhèÿW^yE3fÌÉd’ÕjÕoû[ﲿýío•™™©¸¸8]-: üDIDATrÉ%zì±ÇzL&ús<|°´X,*--í÷Á²©©I±±±Œ¢…açÎ~wYTTä½÷@0Mœ8Q—_~¹_1O˜0†Ìù1{=ºí¶ÛHð w€Irdµ}ô‘;::Ú˜˜è®¬¬ì2Ïn·»ÇŽvüñÇn·Ûí~öÙg½Û:ÿÏ–-[Ün·ÛýÒK/¹###{\Æ`0¸×¬YÓeMMMnIî´´4÷+¯¼Òc¹´´4÷;ï¼ãމ‰éqþo¼á]ßï~÷;·Á`èq9In£ÑèÞ¹sç bï–ä®­­õ+Ö·Þz«[R·x¼üòËnIîÛn»Íív»ÝøÃz¥$wTT”ûü£·| ÏMss³7þ¯¾újåÞzë-÷“O>Ùã¼gžy¦Ëúü=þyñÅÝË–-soÙ²Å]ZZÚíÏ–-[ÜË–-s8p€`aDxòÉ'Ý/½ôR×so{ì1÷›o¾Ið Ûû±çO}}=ÁBPüíosß{ï½î¿üå/&2D†ÍϧW]u•þýßÿ]uuuú§ú§ó“Pºûî»uæÌ=öØcš={¶$é¾ûîëRCÃÓüëÚk¯ÕáÇuß}÷I’zè!•——«±±QGŽÑã?®¨¨(ýÛ¿ý[—jÔÑÑÑ’¤³gÏꡇÒ?ýÓ?©¬¬L­­­:sæŒwÞ·¾õ-ÝsÏ=:~ü¸Z[[µoß>Í™3G’ôôÓO{×·nÝ:F=ñÄÞv±gÏžÕÖ­[5{öl566êá‡t̤ÿ¿½û©ºúÿþY^á‘_Ò€pWÙúá@Ȩü‘‹ð\¡@Af`eåÀ:ÜlÑ]£D˲´¥m4§‚ÖL6j ‡øƒ°àF .ñ£KÈ.—ß¿/\Þß?üðþòö^ðòã^îÅçcc»÷žs߯÷9¯·Üëá¼ÏÐ××göûâââ`ÂYS999€øøxÀ`0ššŠúúz appÕÕÕØ»w/d2¾ûî;ñý–ÌMZZŽ?޾¾>466"""ðÑGá“O>ÁéÓ§Ñßß/)ËÊÊ’´oªí!¢©™l;×ñ‹,¯ZµŠEDDdáÏcS3å »»Ûì…q‰fCXXÞzë-„††Îx‘pš„­ÌäA0 Â3Ï<#™}qôèQ€°~ýzÁ`0˜/))I (•J“qÒÓÓBrr²ÉcmÞ¼yÂ8ñññ&g!\]]ÍjçŸþ)ärùŒú®¸¸XpvvcïܹS8sæŒÐÔÔ4éûWWWaÁ‚Bcc£¤¬³³S¸ï¾ûA¯× ‚ ˆ3—¦:Êo‰Ü¤¥¥I^¯¬¬Ërss‘šš 777dee™½nÃåË—%3Vî ¸råŠÉò¤¤¤ ýÞ{ï½6¶JxWW—Yç7V¿··wF}Œ7n`Ó¦MèììÄÉ“' ___( $%%‰ëâŒ'“ÉðòË/Cœ;wNRvñâEèõzDGG‹3hÆþÚž˜˜ˆæææóLsóÊ+¯Hž¯X±B|m²¬§§Gòúl¶‡ˆ&ÿ â¶mÛàéé (((à.Zd—,X€[·nÁÝÝ]üQ©TÈÌÌ”ü¨T*±<44 …‚GDsN.—K6G[dùwÞaçÍÇï-ÿ›%1«_„Ì5Q蜜DEE‰Ï/\¸€­[·Noü±\]]ÍpquuEGG‡Ñ±š››qÿý÷›ŒÓÓÓ¹\nÖyèt:;v ùùùÐh4hmmÅðð0FFF0::jTßÔ1ÌÕØØˆ¼¼<¢°°ÿý÷ŸxÌÝ»wãÈ‘#pttë_»v O=õ{ì1”––НoÙ²¹¹¹øã?Ä[ãJKK±qãFtttÀÁÁÁÁÁX¿~=6lØ€ððpÉq-›¾¾>£-Í)ÓmMÏèè(´Z-är¹ÉߟD¶®¿¿mmm’…!ÛÚÚ Óé$õ<<<àîîÎ#"›ÔÛÛ‹ŒŒ twws‘e¢yÌbƒ<39ìàà üýý¡Õjáåå…††,^¼ØìxŽŽŽ0 w³páBŒŒŒË`0ͺ[»î,¯¯¯Çºuëî:Sd¶yî¤V«ñý÷ßãСC@FFRRR$uPWW‡êêj( tuuÁËË …ååå’º---8rärrrP__/¾îííôôt¼ñÆVÉ©¾™NÙtÚCDDDDdÏÎ;‡°°0ðÍc6¹omZZ´Z-ÜÜÜ Õj‘––6¥÷ý¥¸££C\ô×ÔÏøAI§ÌÂv¾)))hnn†¿¿?NŸ>††ôôô`hhȬAŽ™ ć~ˆ3g΀Ʌ„_}õUñ—=p{Kv½^/.¸<ž··7222PWW‡šš|ñÅGKK pìØ1«äf¶ÌV{ˆˆˆˆˆìŶmÛ8ÀC4ÏÙÜLžëׯ#<<>>>¸víBCCÑÒÒ‚+W®`ݺufÅ[³f JJJðÛo¿!$$dVÎ}ª3yÜÝÝÑÞÞŽªª*£d±|ùr£ãM¥ïþý÷_#""ÎÎÎÖÄâÅ‹!“É000 )«««C@@€xËÖ /¼€_~ùMMMðöö6«Ï¾ùæìÚµ ¨­­µ¹ÜLõzœ¬=DDDDDDD¶Ì¦fòô÷÷cÇŽÅÉ“'ñÀàøñãÅŽ;Ðßß?á{ÇÏŽÙ¸q#àСC&ëþüóÏP(8pà€ÅÚ244ðññ1*ûàƒÄÇz½~ZÇþùç…?þxÒzׯ_øúú•­X±O>ù$T*T*~ýõWlÞ¼Ùìؾ};ˆkÙCnfÚ"""""""[dSƒ<ûöíƒZ­Fbb"6mÚàöBÀ111¨­­Å¾}ûŒÞ#“ÉÜ^HxlAß7ß|NNN8þ>z½r¹YYYxñÅ%q222™™)îv§E‹áàÁƒ’‘ÀÀ@ÄÄĘÌÍÕ«Wqøðá ×Ý €R©„ŸŸÂÂÂ&ÌÍXœ±ÛáAÀ7ÐÞÞnt=Žï£”””iµglà€Qsÿ}ܙâ¢"I¹›››Øf¨®®†Z­ž“8æ´ÙZq¦ÚfkÅaïÎåµbO9œ«k…9d­Ã¹¼Vì)‡¶ô¹Ë2‡–ê[[úîdë9´¥ÏݹÊa~~>6lØ`ñA“{Ek4\ºtÉèV#WWWxyy‰ÏÇÿ~º*++QRR‚¯¾ú ÀíÛ%Ûì666â¹çžCvv6>ýôS¼ýöÛâ¹$''#%%Z­â9yyyá‡~À©S§P\\ ­V xxxàá‡FHH4 \\\ŒÎi|»Ìi³B¡WUU¡´´...ˆˆˆ@qq1ºººàîk×bÏž=¨®®Æ£> µZ ­V‹%K– ªªJŒ½{÷‚æ¿íÛ·C¥RáâÅ‹F‹DÑô òœ8qÂâ±L®É³råJ(•Jfâpþüy¨T*¬]»–`_ in the OpenStack Manuals Configuration Reference. HBase =================== This storage implementation uses Thrift HBase interface. The default Thrift connection settings should be changed to support using ConnectionPool in HBase. To ensure proper configuration, please add the following lines to the `hbase-site.xml` configuration file:: hbase.thrift.minWorkerThreads 200 For pure development purposes, you can use HBase from Apache_ or some other vendor like Cloudera or Hortonworks. To verify your installation, you can use the `list` command in `HBase shell`, to list the tables in your HBase server, as follows:: $ ${HBASE_HOME}/bin/hbase shell hbase> list .. note:: This driver has been tested against HBase 0.94.2/CDH 4.2.0, HBase 0.94.4/HDP 1.2, HBase 0.94.18/Apache, HBase 0.94.5/Apache, HBase 0.96.2/Apache and HBase 0.98.0/Apache. Versions earlier than 0.92.1 are not supported due to feature incompatibility. To find out more about supported storage backends please take a look on the :doc:`install/manual/` guide. .. note:: If you are changing the configuration on the fly to use HBase, as a storage backend, you will need to restart the Ceilometer services that use the database to allow the changes to take affect, i.e. the collector and API services. .. _Apache: https://hbase.apache.org/book/quickstart.html Sample Configuration file ========================= The sample configuration file for Ceilometer, named etc/ceilometer/ceilometer.conf.sample, was removed from version control after the Icehouse release. For more details, please read the file etc/ceilometer/README-ceilometer.conf.txt. You can generate this sample configuration file by running ``tox -e genconfig``. .. note:: tox version 1.7.0 and 1.7.1 have a `backward compatibility issue`_ with OpenStack projects. If you meet the "tox.ConfigError: ConfigError: substitution key 'posargs' not found" problem, run ``sudo pip install -U "tox>=1.6.1,!=1.7.0,!=1.7.1"`` to get a proper version, then try ``tox -e genconfig`` again. .. _`backward compatibility issue`: https://bitbucket.org/hpk42/tox/issue/150/posargs-configerror .. _Pipeline-Configuration: Pipelines ========= Pipelines describe a coupling between sources of samples and the corresponding sinks for transformation and publication of the samples. A source is a producer of samples, in effect a set of pollsters and/or notification handlers emitting samples for a set of matching meters. See :doc:`plugins` for details on how to write and plug in your plugins. Each source configuration encapsulates meter name matching, polling interval determination, optional resource enumeration or discovery, and mapping to one or more sinks for publication. A sink on the other hand is a consumer of samples, providing logic for the transformation and publication of samples emitted from related sources. Each sink configuration is concerned `only` with the transformation rules and publication conduits for samples. In effect, a sink describes a chain of handlers. The chain starts with zero or more transformers and ends with one or more publishers. The first transformer in the chain is passed samples from the corresponding source, takes some action such as deriving rate of change, performing unit conversion, or aggregating, before passing the modified sample to next step. The chains end with one or more publishers. This component makes it possible to persist the data into storage through the message bus or to send it to one or more external consumers. One chain can contain multiple publishers, see the :ref:`multi-publisher` section. Pipeline configuration ---------------------- Pipeline configuration by default, is stored in a separate configuration file, called pipeline.yaml, next to the ceilometer.conf file. The pipeline configuration file can be set in the *pipeline_cfg_file* parameter in ceilometer.conf. Multiple chains can be defined in one configuration file. The chain definition looks like the following:: --- sources: - name: 'source name' interval: 'how often should the samples be injected into the pipeline' meters: - 'meter filter' resources: - 'list of resource URLs' discovery: - 'list of discoverers' sinks - 'sink name' sinks: - name: 'sink name' transformers: 'definition of transformers' publishers: - 'list of publishers' The *name* parameter of a source is unrelated to anything else; nothing references a source by name, and a source's name does not have to match anything. The *interval* parameter in the sources section should be defined in seconds. It determines the cadence of sample injection into the pipeline, where samples are produced under the direct control of an agent, i.e. via a polling cycle as opposed to incoming notifications. There are several ways to define the list of meters for a pipeline source. The list of valid meters can be found in the :ref:`measurements` section. There is a possibility to define all the meters, or just included or excluded meters, with which a source should operate: * To include all meters, use the '*' wildcard symbol. * To define the list of meters, use either of the following: * To define the list of included meters, use the 'meter_name' syntax * To define the list of excluded meters, use the '!meter_name' syntax * For meters, which identify a complex Sample field, use the wildcard symbol to select all, e.g. for "disk.read.bytes", use "disk.\*" The above definition methods can be used in the following combinations: * Only the wildcard symbol * The list of included meters * The list of excluded meters * Wildcard symbol with the list of excluded meters .. note:: At least one of the above variations should be included in the meters section. Included and excluded meters cannot co-exist in the same pipeline. Wildcard and included meters cannot co-exist in the same pipeline definition section. A given polling plugin is invoked according to each source section whose *meters* parameter matches the plugin's meter name. That is, the matching source sections are combined by union, not intersection, of the prescribed time series. The optional *resources* section of a pipeline source allows a list of static resource URLs to be configured. An amalgamated list of all statically configured resources for a set of pipeline sources with a common interval is passed to individual pollsters matching those pipelines. The optional *discovery* section of a pipeline source contains the list of discoverers. These discoverers can be used to dynamically discover the resources to be polled by the pollsters defined in this pipeline. The name of the discoverers should be the same as the related names of plugins in setup.cfg. If *resources* or *discovery* section is not set, the default value would be an empty list. If both *resources* and *discovery* are set, the final resources passed to the pollsters will be the combination of the dynamic resources returned by the discoverers and the static resources defined in the *resources* section. If there are some duplications between the resources returned by the discoverers and those defined in the *resources* section, the duplication will be removed before passing those resources to the pollsters. There are three ways a pollster can get a list of resources to poll, as the following in descending order of precedence: 1. From the per-pipeline configured discovery and/or static resources. 2. From the per-pollster default discovery. 3. From the per-agent default discovery. The *transformers* section of a pipeline sink provides the possibility to add a list of transformer definitions. The names of the transformers should be the same as the names of the related extensions in setup.cfg. For a more detailed description, please see the `transformers`_ section of the Administrator Guide of Ceilometer. .. _transformers: http://docs.openstack.org/admin-guide-cloud/telemetry-data-collection.html#transformers The *publishers* section contains the list of publishers, where the samples data should be sent after the possible transformations. The names of the publishers should be the same as the related names of the plugins in setup.cfg. The default configuration can be found in `pipeline.yaml`_. .. _pipeline.yaml: https://git.openstack.org/cgit/openstack/ceilometer/tree/etc/ceilometer/pipeline.yaml Publishers ++++++++++ For more information about publishers see the `publishers`_ section of the Administrator Guide of Ceilometer. .. _publishers: http://docs.openstack.org/admin-guide-cloud/telemetry-data-retrieval.html#publishers ceilometer-6.1.5/doc/source/webapi/0000775000567000056710000000000013072745164020341 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/doc/source/webapi/index.rst0000664000567000056710000000241713072744706022207 0ustar jenkinsjenkins00000000000000========= Web API ========= .. toctree:: :maxdepth: 2 v2 You can get API version list via request to endpoint root path. For example:: curl -H "X-AUTH-TOKEN: fa2ec18631f94039a5b9a8b4fe8f56ad" http://127.0.0.1:8777 Sample response:: { "versions": { "values": [ { "id": "v2", "links": [ { "href": "http://127.0.0.1:8777/v2", "rel": "self" }, { "href": "http://docs.openstack.org/", "rel": "describedby", "type": "text/html" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.telemetry-v2+json" }, { "base": "application/xml", "type": "application/vnd.openstack.telemetry-v2+xml" } ], "status": "stable", "updated": "2013-02-13T00:00:00Z" } ] } } ceilometer-6.1.5/doc/source/webapi/v2.rst0000664000567000056710000006336513072744706021440 0ustar jenkinsjenkins00000000000000.. docbookrestapi ============ V2 Web API ============ Resources ========= .. rest-controller:: ceilometer.api.controllers.v2.resources:ResourcesController :webprefix: /v2/resources .. autotype:: ceilometer.api.controllers.v2.resources.Resource :members: Meters ====== .. rest-controller:: ceilometer.api.controllers.v2.meters:MetersController :webprefix: /v2/meters .. rest-controller:: ceilometer.api.controllers.v2.meters:MeterController :webprefix: /v2/meters .. autotype:: ceilometer.api.controllers.v2.meters.Meter :members: .. autotype:: ceilometer.api.controllers.v2.meters.OldSample :members: Samples and Statistics ====================== .. rest-controller:: ceilometer.api.controllers.v2.samples:SamplesController :webprefix: /v2/samples .. autotype:: ceilometer.api.controllers.v2.samples.Sample :members: .. autotype:: ceilometer.api.controllers.v2.meters.Statistics :members: When a simple statistics request is invoked (using GET /v2/meters//statistics), it will return the standard set of *Statistics*: *avg*, *sum*, *min*, *max*, and *count*. .. note:: If using Ceilometer data for statistics, it's recommended to use a backend such as Gnocchi_ rather than Ceilometer's interface. Gnocchi is designed specifically for this use case by providing a light-weight, aggregated model. As they manage data differently, the API models returned by Ceilometer and Gnocchi are different. The Gnocchi API can be found here_. .. _Gnocchi: http://docs.openstack.org/developer/gnocchi/ .. _here: http://docs.openstack.org/developer/gnocchi/rest.html Selectable Aggregates +++++++++++++++++++++ The Statistics API has been extended to include the aggregate functions *stddev* and *cardinality*. You can explicitly select these functions or any from the standard set by specifying an aggregate function in the statistics query:: GET /v2/meters//statistics?aggregate.func=&aggregate.param= (where aggregate.param is optional). Duplicate aggregate function and parameter pairs are silently discarded from the statistics query. Partial duplicates, in the sense of the same function but differing parameters, for example:: GET /v2/meters//statistics?aggregate.func=cardinality&aggregate.param=resource_id&aggregate.func=cardinality&aggregate.param=project_id are, on the other hand, both allowed by the API and supported by the storage drivers. See the :ref:`functional-examples` section for more detail. .. note:: Currently only *cardinality* needs aggregate.param to be specified. .. autotype:: ceilometer.api.controllers.v2.meters.Aggregate :members: Capabilities ============ The Capabilities API allows you to directly discover which functions from the V2 API functionality, including the selectable aggregate functions, are supported by the currently configured storage driver. A capabilities query returns a flattened dictionary of properties with associated boolean values - a 'False' or absent value means that the corresponding feature is not available in the backend. .. rest-controller:: ceilometer.api.controllers.v2.capabilities:CapabilitiesController :webprefix: /v2/capabilities .. autotype:: ceilometer.api.controllers.v2.capabilities.Capabilities :members: Events and Traits ================= .. rest-controller:: ceilometer.api.controllers.v2.events:EventTypesController :webprefix: /v2/event_types .. rest-controller:: ceilometer.api.controllers.v2.events:TraitsController :webprefix: /v2/event_types/(event_type)/traits .. rest-controller:: ceilometer.api.controllers.v2.events:EventsController :webprefix: /v2/events .. autotype:: ceilometer.api.controllers.v2.events.Event :members: .. autotype:: ceilometer.api.controllers.v2.events.Trait :members: .. autotype:: ceilometer.api.controllers.v2.events.TraitDescription :members: Filtering Queries ================= Ceilometer's REST API currently supports two types of queries. The Simple Query functionality provides simple filtering on several fields of the *Sample* type. Complex Query provides the possibility to specify queries with logical and comparison operators on the fields of *Sample*. You may also apply filters based on the values of one or more of the *resource_metadata* field, which you can identify by using *metadata.* syntax in either type of query. Note, however, that given the free-form nature of *resource_metadata* field, there is no practical or consistent way to validate the query fields under *metadata* domain like it is done for all other fields. .. note:: The API call will return HTTP 200 OK status for both of the following cases: when a query with *metadata.* does not match its value, and when ** itself does not exist in any of the records being queried. Simple Query ++++++++++++ Many of the endpoints above accept a query filter argument, which should be a list of Query data structures. Whatever the endpoint you want to apply a filter on, you always filter on the fields of the *Sample* type (for example, if you apply a filter on a query for statistics, you won't target *duration_start* field of *Statistics*, but *timestamp* field of *Sample*). See :ref:`api-queries` for how to query the API. .. autotype:: ceilometer.api.controllers.v2.base.Query :members: Event Query +++++++++++ Event query is similar to simple query, its type EventQuery is actually a subclass of Query, so EventQuery has every attribute Query has. But there are some differences. If a field is one of the following: event_type, message_id, start_timestamp, end_timestamp, then this field will be applied on event, otherwise it will be treated as trait name and applied on trait. See :ref:`api-queries` for how to query the API. .. autotype:: ceilometer.api.controllers.v2.events.EventQuery :members: Complex Query +++++++++++++ The filter expressions of the Complex Query feature operate on the fields of *Sample*. The following comparison operators are supported: *=*, *!=*, *<*, *<=*, *>*, *>=* and *in*; and the following logical operators can be used: *and* *or* and *not*. The field names are validated against the database models. See :ref:`api-queries` for how to query the API. .. note:: The *not* operator has different meaning in MongoDB and in SQL DB engine. If the *not* operator is applied on a non existent metadata field then the result depends on the DB engine. For example if {"not": {"metadata.nonexistent_field" : "some value"}} filter is used in a query the MongoDB will return every Sample object as *not* operator evaluated true for every Sample where the given field does not exists. See more in the MongoDB doc. On the other hand SQL based DB engine will return empty result as the join operation on the metadata table will return zero rows as the on clause of the join which tries to match on the metadata field name is never fulfilled. Complex Query supports defining the list of orderby expressions in the form of [{"field_name": "asc"}, {"field_name2": "desc"}, ...]. The number of the returned items can be bounded using the *limit* option. The *filter*, *orderby* and *limit* are all optional fields in a query. .. rest-controller:: ceilometer.api.controllers.v2.query:QuerySamplesController :webprefix: /v2/query/samples .. autotype:: ceilometer.api.controllers.v2.query.ComplexQuery :members: Links ===== .. autotype:: ceilometer.api.controllers.v2.base.Link :members: API and CLI query examples ========================== CLI Queries +++++++++++ Ceilometer CLI Commands:: $ ceilometer --debug --os-username --os-password --os-auth-url http://localhost:5000/v2.0/ --os-tenant-name admin meter-list .. note:: The *username*, *password*, and *tenant-name* options are required to be present in these arguments or specified via environment variables. Note that the in-line arguments will override the environment variables. .. _api-queries: API Queries +++++++++++ Ceilometer API calls: .. note:: To successfully query Ceilometer you must first get a project-specific token from the Keystone service and add it to any API calls that you execute against that project. See the `OpenStack credentials documentation `_ for additional details. A simple query to return a list of available meters:: curl -H 'X-Auth-Token: ' \ "http://localhost:8777/v2/meters" A query to return the list of resources:: curl -H 'X-Auth-Token: ' \ "http://localhost:8777/v2/resources" A query to return the list of samples, limited to a specific meter type:: curl -H 'X-Auth-Token: ' \ "http://localhost:8777/v2/meters/disk.root.size" A query using filters (see: `query filter section `_):: curl -H 'X-Auth-Token: ' \ "http://localhost:8777/v2/meters/instance?q.field=metadata.event_type&q.value=compute.instance.delete.start" Additional examples:: curl -H 'X-Auth-Token: ' \ "http://localhost:8777/v2/meters/disk.root.size?q.field=resource_id&q.op=eq&q.value=" or:: curl -H 'X-Auth-Token: ' \ "http://localhost:8777/v2/meters/instance?q.field=metadata.event_type&q.value=compute.instance.exists" You can specify multiple filters by using an array of queries (order matters):: curl -H 'X-Auth-Token: ' \ "http://localhost:8777/v2/meters/instance"\ "?q.field=metadata.event_type&q.value=compute.instance.exists"\ "&q.field=timestamp&q.op=gt&q.value=2013-07-03T13:34:17" A query to find the maximum value and standard deviation (*max*, *stddev*) of the CPU utilization for a given instance (identified by *resource_id*):: curl -H 'X-Auth-Token: ' \ "http://localhost:8777/v2/meters/cpu_util/statistics?aggregate.func=max&aggregate.func=stddev"\ "&q.field=resource_id&q.op=eq&q.value=64da755c-9120-4236-bee1-54acafe24980" .. note:: If any of the requested aggregates are not supported by the storage driver, a HTTP 400 error code will be returned along with an appropriate error message. JSON based example:: curl -X GET -H "X-Auth-Token: " -H "Content-Type: application/json" -d '{"q": [{"field": "timestamp", "op": "ge", "value": "2014-04-01T13:34:17"}]}' http://localhost:8777/v2/meters/instance JSON based example with multiple filters:: curl -X GET -H "X-Auth-Token: " -H "Content-Type: application/json" -d '{"q": [{"field": "timestamp", "op": "ge", "value": "2014-04-01T13:34:17"}, {"field": "resource_id", "op": "eq", "value": "4da2b992-0dc3-4a7c-a19a-d54bf918de41"}]}' http://localhost:8777/v2/meters/instance .. _functional-examples: Functional examples +++++++++++++++++++ The examples below are meant to help you understand how to query the Ceilometer API to build custom meters report. The query parameters should be encoded using one of the above methods, e.g. as the URL parameters or as JSON encoded data passed to the GET request. Get the list of samples about instances running for June 2013:: GET /v2/meters/instance q: [{"field": "timestamp", "op": "ge", "value": "2013-06-01T00:00:00"}, {"field": "timestamp", "op": "lt", "value": "2013-07-01T00:00:00"}] Get the list of samples about instances running for June 2013 for a particular project:: GET /v2/meters/instance q: [{"field": "timestamp", "op": "ge", "value": "2013-06-01T00:00:00"}, {"field": "timestamp", "op": "lt", "value": "2013-07-01T00:00:00"}, {"field": "project_id", "op": "eq", "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}] Now you may want to have statistics on the meters you are targeting. Consider the following example where you are getting the list of samples about CPU utilization of a given instance (identified by its *resource_id*) running for June 2013:: GET /v2/meters/cpu_util q: [{"field": "timestamp", "op": "ge", "value": "2013-06-01T00:00:00"}, {"field": "timestamp", "op": "lt", "value": "2013-07-01T00:00:00"}, {"field": "resource_id", "op": "eq", "value": "64da755c-9120-4236-bee1-54acafe24980"}] You can have statistics on the list of samples requested (*avg*, *sum*, *max*, *min*, *count*) computed on the full duration:: GET /v2/meters/cpu_util/statistics q: [{"field": "timestamp", "op": "ge", "value": "2013-06-01T00:00:00"}, {"field": "timestamp", "op": "lt", "value": "2013-07-01T00:00:00"}, {"field": "resource_id", "op": "eq", "value": "64da755c-9120-4236-bee1-54acafe24980"}] You may want to aggregate samples over a given period (10 minutes for example) in order to get an array of the statistics computed on smaller durations:: GET /v2/meters/cpu_util/statistics q: [{"field": "timestamp", "op": "ge", "value": "2013-06-01T00:00:00"}, {"field": "timestamp", "op": "lt", "value": "2013-07-01T00:00:00"}, {"field": "resource_id", "op": "eq", "value": "64da755c-9120-4236-bee1-54acafe24980"}] period: 600 The *period* parameter aggregates by time range. You can also aggregate by field using the *groupby* parameter. Currently, the *user_id*, *resource_id*, *project_id*, and *source* fields are supported. Below is an example that uses a query filter and group by aggregation on *project_id* and *resource_id*:: GET /v2/meters/instance/statistics q: [{"field": "user_id", "op": "eq", "value": "user-2"}, {"field": "source", "op": "eq", "value": "source-1"}] groupby: ["project_id", "resource_id"] The statistics will be returned in a list, and each entry of the list will be labeled with the group name. For the previous example, the first entry might have *project_id* be "project-1" and *resource_id* be "resource-1", the second entry have *project_id* be "project-1" and *resource_id* be "resource-2", and so on. You can request both period and group by aggregation in the same query:: GET /v2/meters/instance/statistics q: [{"field": "source", "op": "eq", "value": "source-1"}] groupby: ["project_id"] period: 7200 Note that period aggregation is applied first, followed by group by aggregation. Order matters because the period aggregation determines the time ranges for the statistics. Below is a real-life query:: GET /v2/meters/image/statistics groupby: ["project_id", "resource_id"] With the return values:: [{"count": 4, "duration_start": "2013-09-18T19:08:33", "min": 1.0, "max": 1.0, "duration_end": "2013-09-18T19:27:30", "period": 0, "sum": 4.0, "period_end": "2013-09-18T19:27:30", "duration": 1137.0, "period_start": "2013-09-18T19:08:33", "avg": 1.0, "groupby": {"project_id": "c2334f175d8b4cb8b1db49d83cecde78", "resource_id": "551f495f-7f49-4624-a34c-c422f2c5f90b"}, "unit": "image"}, {"count": 4, "duration_start": "2013-09-18T19:08:36", "min": 1.0, "max": 1.0, "duration_end": "2013-09-18T19:27:30", "period": 0, "sum": 4.0, "period_end": "2013-09-18T19:27:30", "duration": 1134.0, "period_start": "2013-09-18T19:08:36", "avg": 1.0, "groupby": {"project_id": "c2334f175d8b4cb8b1db49d83cecde78", "resource_id": "7c1157ed-cf30-48af-a868-6c7c3ad7b531"}, "unit": "image"}, {"count": 4, "duration_start": "2013-09-18T19:08:34", "min": 1.0, "max": 1.0, "duration_end": "2013-09-18T19:27:30", "period": 0, "sum": 4.0, "period_end": "2013-09-18T19:27:30", "duration": 1136.0, "period_start": "2013-09-18T19:08:34", "avg": 1.0, "groupby": {"project_id": "c2334f175d8b4cb8b1db49d83cecde78", "resource_id": "eaed9cf4-fc99-4115-93ae-4a5c37a1a7d7"}, "unit": "image"}] You can request specific aggregate functions as well. For example, if you only want the average CPU utilization, the GET request would look like this:: GET /v2/meters/cpu_util/statistics?aggregate.func=avg Use the same syntax to access the aggregate functions not in the standard set, e.g. *stddev* and *cardinality*. A request for the standard deviation of CPU utilization would take the form:: GET /v2/meters/cpu_util/statistics?aggregate.func=stddev And would give a response such as the example:: [{"aggregate": {"stddev":0.6858829535841072}, "duration_start": "2014-01-30T11:13:23", "duration_end": "2014-01-31T16:07:13", "duration": 104030.0, "period": 0, "period_start": "2014-01-30T11:13:23", "period_end": "2014-01-31T16:07:13", "groupby": null, "unit" : "%"}] The request syntax is similar for *cardinality* but with the aggregate.param option provided. So, for example, if you want to know the number of distinct tenants with images, you would do:: GET /v2/meters/image/statistics?aggregate.func=cardinality &aggregate.param=project_id For a more involved example, consider a requirement for determining, for some tenant, the number of distinct instances (*cardinality*) as well as the total number of instance samples (*count*). You might also want to see this information with 15 minute long intervals. Then, using the *period* and *groupby* options, a query would look like the following:: GET /v2/meters/instance/statistics?aggregate.func=cardinality &aggregate.param=resource_id &aggregate.func=count &groupby=project_id&period=900 This would give an example response of the form:: [{"count": 19, "aggregate": {"count": 19.0, "cardinality/resource_id": 3.0}, "duration": 328.478029, "duration_start": "2014-01-31T10:00:41.823919", "duration_end": "2014-01-31T10:06:10.301948", "period": 900, "period_start": "2014-01-31T10:00:00", "period_end": "2014-01-31T10:15:00", "groupby": {"project_id": "061a5c91811e4044b7dc86c6136c4f99"}, "unit": "instance"}, {"count": 22, "aggregate": {"count": 22.0, "cardinality/resource_id": 4.0}, "duration": 808.00384, "duration_start": "2014-01-31T10:15:15", "duration_end": "2014-01-31T10:28:43.003840", "period": 900, "period_start": "2014-01-31T10:15:00", "period_end": "2014-01-31T10:30:00", "groupby": {"project_id": "061a5c91811e4044b7dc86c6136c4f99"}, "unit": "instance"}, {"count": 2, "aggregate": {"count": 2.0, "cardinality/resource_id": 2.0}, "duration": 0.0, "duration_start": "2014-01-31T10:35:15", "duration_end": "2014-01-31T10:35:15", "period": 900, "period_start": "2014-01-31T10:30:00", "period_end": "2014-01-31T10:45:00", "groupby": {"project_id": "061a5c91811e4044b7dc86c6136c4f99"}, "unit": "instance"}] If you want to retrieve all the instances (not the list of samples, but the resource itself) that have been run during this month for a given project, you should ask the resource endpoint for the list of resources (all types: including storage, images, networking, ...):: GET /v2/resources q: [{"field": "timestamp", "op": "ge", "value": "2013-06-01T00:00:00"}, {"field": "timestamp", "op": "lt", "value": "2013-07-01T00:00:00"}, {"field": "project_id", "op": "eq", "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}] Then look for resources that have an *instance* meter linked to them. That will indicate resources that have been measured as being instance. You can then request their samples to have more detailed information, like their state or their flavor:: GET /v2/meter/instance q: [{"field": "timestamp", "op": "ge", "value": "2013-06-01T00:00:00"}, {"field": "timestamp", "op": "lt", "value": "2013-07-01T00:00:00"}, {"field": "resource_id", "op": "eq", "value": "64da755c-9120-4236-bee1-54acafe24980"}, {"field": "project_id", "op": "eq", "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}] This will return a list of samples that have been recorded on this particular resource. You can inspect them to retrieve information, such as the instance state (check the *metadata.vm_state* field) or the instance flavor (check the *metadata.flavor* field). You can request nested metadata fields by using a dot to delimit the fields (e.g. *metadata.weighted_host.host* for *instance.scheduled* meter) To retrieve only the 3 last samples of a meters, you can pass the *limit* parameter to the query:: GET /v2/meter/instance q: [{"field": "timestamp", "op": "ge", "value": "2013-06-01T00:00:00"}, {"field": "timestamp", "op": "lt", "value": "2013-07-01T00:00:00"}, {"field": "resource_id", "op": "eq", "value": "64da755c-9120-4236-bee1-54acafe24980"}, {"field": "project_id", "op": "eq", "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}] limit: 3 This query would only return the last 3 samples. Functional example for Complex Query ++++++++++++++++++++++++++++++++++++ This example demonstrates how complex query filter expressions can be generated and sent to the /v2/query/samples endpoint of Ceilometer API using POST request. To check for *cpu_util* samples reported between 18:00-18:15 or between 18:30 - 18:45 on a particular date (2013-12-01), where the utilization is between 23 and 26 percent, but not exactly 25.12 percent, the following filter expression can be created:: {"and": [{"and": [{"=": {"counter_name": "cpu_util"}}, {">": {"counter_volume": 0.23}}, {"<": {"counter_volume": 0.26}}, {"not": {"=": {"counter_volume": 0.2512}}}]}, {"or": [{"and": [{">": {"timestamp": "2013-12-01T18:00:00"}}, {"<": {"timestamp": "2013-12-01T18:15:00"}}]}, {"and": [{">": {"timestamp": "2013-12-01T18:30:00"}}, {"<": {"timestamp": "2013-12-01T18:45:00"}}]}]}]} Different sorting criteria can be defined for the query filter, for example the results can be ordered in an ascending order by the *counter_volume* and descending order based on the *timestamp*. The following order by expression has to be created for specifying this criteria:: [{"counter_volume": "ASC"}, {"timestamp": "DESC"}] As the current implementation accepts only string values as query filter and order by definitions, the above defined expressions have to be converted to string values. By adding a limit criteria to the request, which maximizes the number of returned samples to four, the query looks like the following:: { "filter" : "{\"and\":[{\"and\": [{\"=\": {\"counter_name\": \"cpu_util\"}}, {\">\": {\"counter_volume\": 0.23}}, {\"<\": {\"counter_volume\": 0.26}}, {\"not\": {\"=\": {\"counter_volume\": 0.2512}}}]}, {\"or\": [{\"and\": [{\">\": {\"timestamp\": \"2013-12-01T18:00:00\"}}, {\"<\": {\"timestamp\": \"2013-12-01T18:15:00\"}}]}, {\"and\": [{\">\": {\"timestamp\": \"2013-12-01T18:30:00\"}}, {\"<\": {\"timestamp\": \"2013-12-01T18:45:00\"}}]}]}]}", "orderby" : "[{\"counter_volume\": \"ASC\"}, {\"timestamp\": \"DESC\"}]", "limit" : 4 } A query request looks like the following with curl:: curl -X POST -H 'X-Auth-Token: ' -H 'Content-Type: application/json' \ -d '' \ http://localhost:8777/v2/query/samples .. _user-defined-data: User-defined data +++++++++++++++++ It is possible to add your own samples (created from data retrieved in any way like monitoring agents on your instances) in Ceilometer to store them and query on them. You can even get *Statistics* on your own inserted data. By adding a *Sample* to a *Resource*, you create automatically the corresponding *Meter* if it does not exist already. To achieve this, you have to POST a list of one to many samples in JSON format:: curl -X POST -H 'X-Auth-Token: ' -H 'Content-Type: application/json' \ -d '' \ http://localhost:8777/v2/meters/ Fields *source*, *timestamp*, *project_id* and *user_id* are automatically added if not present in the samples. Field *message_id* is not taken into account if present and an internal value will be set. By default, samples posted via API will be placed on the notification bus and processed by the notification agent. To avoid re-queuing the data, samples posted via API can be stored directly to the storage backend verbatim by specifying a boolean flag 'direct' in the request URL, like this:: POST /v2/meters/ram_util?direct=True Samples posted this way will bypass pipeline processing. Here is an example showing how to add a sample for a *ram_util* meter (already existing or not):: POST /v2/meters/ram_util body: [ { "counter_name": "ram_util", "user_id": "4790fbafad2e44dab37b1d7bfc36299b", "resource_id": "87acaca4-ae45-43ae-ac91-846d8d96a89b", "resource_metadata": { "display_name": "my_instance", "my_custom_metadata_1": "value1", "my_custom_metadata_2": "value2" }, "counter_unit": "%", "counter_volume": 8.57762938230384, "project_id": "97f9a6aaa9d842fcab73797d3abb2f53", "counter_type": "gauge" } ] You get back the same list containing your example completed with the missing fields : *source* and *timestamp* in this case. ceilometer-6.1.5/doc/source/testing.rst0000664000567000056710000000531413072744703021302 0ustar jenkinsjenkins00000000000000.. Copyright 2012 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================= Running the Tests ================= Ceilometer includes an extensive set of automated unit tests which are run through tox_. 1. Install ``tox``:: $ sudo pip install tox 2. On Ubuntu install ``mongodb`` and ``libmysqlclient-dev`` packages:: $ sudo apt-get install mongodb $ sudo apt-get install libmysqlclient-dev For Fedora20 there is no ``libmysqlclient-dev`` package, so you’ll need to install ``mariadb-devel.x86-64`` (or ``mariadb-devel.i386``) instead:: $ sudo yum install mongodb $ sudo yum install mariadb-devel.x86_64 3. Install the test dependencies:: $ sudo pip install -r /opt/stack/ceilometer/test-requirements.txt 4. Run the unit and code-style tests:: $ cd /opt/stack/ceilometer $ tox -e py27,pep8 As tox is a wrapper around testr, it also accepts the same flags as testr. See the `testr documentation`_ for details about these additional flags. .. _testr documentation: https://testrepository.readthedocs.org/en/latest/MANUAL.html Use a double hyphen to pass options to testr. For example, to run only tests under tests/api/v2:: $ tox -e py27 -- api.v2 To debug tests (ie. break into pdb debugger), you can use ''debug'' tox environment. Here's an example, passing the name of a test since you'll normally only want to run the test that hits your breakpoint:: $ tox -e debug ceilometer.tests.test_bin For reference, the ``debug`` tox environment implements the instructions here: https://wiki.openstack.org/wiki/Testr#Debugging_.28pdb.29_Tests 5. There is a growing suite of tests which use a tool called `gabbi`_ to test and validate the behavior of the Ceilometer API. These tests are run when using the usual ``py27`` tox target but if desired they can be run by themselves:: $ tox -e gabbi The YAML files used to drive the gabbi tests can be found in ``ceilometer/tests/gabbi/gabbits``. If you are adding to or adjusting the API you should consider adding tests here. .. _gabbi: https://gabbi.readthedocs.org/ .. seealso:: * tox_ .. _tox: http://tox.testrun.org/latest/ ceilometer-6.1.5/doc/source/4-Transformer.png0000664000567000056710000012235613072744703022252 0ustar jenkinsjenkins00000000000000‰PNG  IHDRýbê|ðHsBITÛáOà IDATx^ì\“W‡3 +€ì,E¦ˆ¨(¨ (ˆ[¬ ëÄŠ«­¶Ö¯¶µÓjÕÖEëĽnEÜŠdÈ^²÷&$ä;!.¦Œìœ·ùYxsǹÏ}!î=÷"›Í&à…@H ) @’‚1â‘@H $À!€ºŸ$€@H@Z î‘–™Æq"$€@¨{ð@H $€¤…êi™i'@H $@‘$õõõ•••555ðŒ ¾ Ñh$‰H$ÊËË+((ÈÉÉIÒxq,H $€@§ˆ¥îihh(((ÈÌÌÌÍÍ-***,,,...//g0ížL&ƒúQUUUk¼455 tuueddÚ¯ˆï"$€@@€(.ñ{@Ö$7^IIIoÞ¼ùXâÈdE%9º¬œ¼¬œMV–L¡ÂÜPehÌz .FmMmMUmMumuUEiq}ý…«A €LLLLMMÍÌÌ´µµáŽL- $€hF@¤u¬ë€Ð‰‰‰yõêUVVW¢‘É5-]u-]5MÚJ=Ô•”‰ÄÎ9*ÕTWV”–”ååçæ½)Ê͆;\4JJJÖÖÖ666–––²²²øÄ $€@C@uèXÔyöìYDD,ók UF×ÈT¿§™–~O }><Ÿ€Òâ‚ÜÌÔœŒÔ¬ÔוåeœN)+++GGG[[[ðâyØ @H $ `¢¥{JKK>|øàÁpÙòŠJ¦}lÌ-u MÉÞk¶Xåe§'Å¥ÄGädB=ýû÷wqq166n« ÞGH $€DŸ€¨èž´´´ëׯGFF²X,š¬™U_s«~:†&Âuµ)/)JŠ|õ´´¨æR__ßÝÝV€`)Hô§-DH $€š¾î‰ŠŠÅ“˜˜–iéZÚ;™Zõ¥REê€;;=%6âQJ|È2ew7·aÆáæþ8!$€/ÂÔ=qqqç΃•XÔ1îmm7h¸¶~OQÆWUQýô ºÚ:îåå5tèP*•sv /$€@H@ô G÷Àá¬ãÇ'$$€â1³²ï?t”Šª†èÃâZXϨ‹~z?òQ¨4aÂðþî~œ¸ C;‘@H —€ uOUUÕ… îÞ½ gÔagÀp/U má"èZï z¢Âï¾||BAì???##£®5…µ@H Á¨îyúô),óTTT@ô!£ÆÁÑtÁ ’½T–•<¾u)16raŒ1Â××C?ó6¶Œ@H ›¤{ Ï¡C‡^¾|)#Cà:ÚÊa0¤Íê¦é¢S=7+íÎ¥“H]]}öìÙ½zõÛÐ$€@Hà=Aè;ÁÁ+++ L{óšDWQ•¼ h`±žß¿ùâa(ìß5 ~ ˜ä G„@H@¬ ðW÷0™Ì'N€7$Ì2Ê×Ò~XÃú¤ñòâæÙÃ%…yàë³`ÁXþùd,€@H ŒuOIIÉ®]»à˜:d–p7]EMS`£bG õá͘çåååçÎ ©¾„h v@H |L€_ºâ 3¬ñ8{ŽçGF-QžÈĘw.ž`2ëaÃËÓÓO¹‹òd¡mH $ =ø¢{žØB/AJ//¿üršJÈãƒÃ@H $ Vx4ùÌ™3 z4u ÆÎ @ÑÓÚ@tróî7Ä-''gëÖ­555­•Á{H $€ ð@÷\½z|z`{kŒß|ºî¶9a]GÛ8ºdffþóÏ? £Írø@H $ÀÝÕ=áááçÎSVUŸYyÜÞúÄ,9{øZôœœ¼wï^ØüDi| $€à)néž„„„ÁÁ°±+=èÈܱy!3YߨüÅ‹ ;VK!$€@¼!ÐuÝS\\ô/Íöœìë=¼1G Z!I“üa[ö?~,#Æ!"$€]Ô=proAŠõ¡£'k‹ÊhÄÄð‚òš:ÖÉ>œ••%&V£™H $€Äž@uœZOOO·ìçdaç(ö „1ºrÈÕZß(«««…aö‰@H@êtE÷DDDÜ»wN­»xŒ—:`¼°ioG—QGå]«Ø@H $Ð&NëžÒÒÒC‡QehîãfÈä6Æ7:@ Ÿ³›®‘)¤q…«ű@H $Ð-Ó=pôzÿþýUUUCFù¢/s·À7Vçc§AWpô)))é~ƒØ@H $ÐÎéžGÅÅÅ÷²îÓw`;â['Ž>Îãkkk9ÒñZX $€èNèžŠŠŠS§NÁY$¯‰]è «´E —ƒ¡©ETTÔ³gÏÚ*ƒ÷‘@H îè„î9yò$ìp9¹ù(Ð1£x÷É7ia¨×Dp™:qâ,üð¸il $€xG £º'55|oµôŒ,ûáﺊªƒ³{YYÙ•+Wxß:¶ˆ@H 4 t„¸3ÃRd“wfpÆíH,ÓY¶‡Æ¾xêâ⢮Žñ¯;ËË ”@ZZÚƒ0Çœ@¡óº3 …âîmxÍÛuÒ=L*%%Åܺ¬÷ˆú€ÄÖ>2™2ØÝçêÉý·kÞ¼yb;4\*\¾|ùåË—R1T‰¤œœœ¯/ü5‹"ŸÖ=ð']HH™L8ÜKŠÀc¨Æ½m´ô{‚wóèÑ£uuu…aö‰:D ¡¡Êý²ÔºC¥±èÈ-ªýçpwEÏ:´ ð‘À§u| ggg[9 >‚M70Ô#äHÍ… "$ âÔ{ÐDÜB4¯-uõåŠBŸðk†ÅžK—.‘)”~CÜ¥Žà‡¬oÒK×Ð6srrß;öˆ@H@² |B÷ÄÆÆÂpo›þŠJÊ’ BtFä&88‹ŽIh @H É ð ÝÃùô%ᨑdŒV,Fa`Ú«‡ºÖÃGàX»XŒF"$€íéXéõÎǰ¸ŒG"ì$Ú Æb2!é½D @H@T´§{îß¿.6ýEÅX¾ÙÁ.:áoH·°:V4¢%›Ûôƒd¥<Äø(|›sl $€¤‘@›º‡Éd>~üX®l`j!x0ŒÌK; ó² Ûƒ1ÔòôžõߦàíR ÕÜÆ¡¸¸ÖÛ„dv‹@H@ ´©{"##+++-ìI¤6Ëð‹GÍómSýþ»Á´ž<ùó…c‡ëFÝ1Ëïhj=¿z½v-íQWôLC‹@H ˆ+65 77x/Ûþ‚Y}vؽ,AŶÃ{×ü°ñÇw‚–ŒæÓŸZÍ`XÅ6¬iäë@Æ&Ÿ­Ø“Xù3 9û>ƒ;#ÿ Û=ÝÇœnojóUpLÎÝusû)ÛêL[{±€IhÈ=0Ê8¯:õ×䱜•$Ý)+gÔµ2>Fæ…­³¸™Ñí ¦,þ'ºL.Ô4uà]Wך‰­X·@H Oh]÷@VðW¯^©k驨j|¢>¼M¦ëª‚]Ygz˜]É"•VÞ²m“Ÿ•;ÿôZÿõaIºغØwzùò­Q È&#OKwþuK×g†‹B}ZèZŸ€c¬fÏ·•«Œ^ô²šH•åDiÌØµ+Ê!`ëö©U‰§}s(¥ùöYõ³m~Ó÷‡Õ ^ýß_)¾¸&àë‹ÅBP>SK;ƒÒ‡Œ±I$€@ÒH uÝŸµõõõð¹+$$q«¿sW!¤_üÖcŒ¥ê÷1ßþô0½š³°#k7oÏ™gN}5çóÙ+f@*‡ìûÏŠYð‘“.µÁÈãï ¾ýcŠ>|S¢µtÇòe뿞ªM ¼Š.Å)C´|Ã*/Y?LQ"âNß‚7>¾ª"¶É$ÐFü8ÍÇ}ί‹úªnì|" ácjÙ ã.¼51¿AH $€ºD õ<Ü5 ›.µÙíJDù^óÏ\ó‰¸wåò£Ç÷Ÿß¿{5^»ýφ-µ•/{´ã̃Ô:¦Ï,KL¨‚·oÍð°äV€Óãržê²ï¿Ì°Ø[]qqq, ò£ ¦SìE‚ €»Ò„¬·jjjùÍmáñÇ‚ú¸nüjç#r¿Õ÷LÒ+øwéœÿr?¶ƒLmT$ga‡,Cn\ßiü÷ÃÕÐÀnT@ì†zî:QÓ·ß®)ŒØ´a‘åÛäCD9­ž2MÔ7ú&½_>¾””Ô»woAõ‰ýH,øøø”””M›6}ùå—(}$všq`H ´K •}®ôôôŠŠ Ca_o4µ>ëÈTï>ª_‡¾õ'¦¨šXé‚ a1ª²žÆƒ§é„YCm{k02J 8»“®7Ïb‹À¥‡]ô˜“KËRãôþ¢(›÷R ªËäÌ:èkYTEEÙV@}\O_sgO³ó ¯t6[TTÒþ•Îá㨑r­¬÷¼~ý è÷ª¶ÓpºË‘ÇÇyGº´Ð –Ä=½Í"ЇM`[R%‰Çvœ"Ðîü¯B!äe\¹zwô´Nx"Åì\°¤Èƒþrïµ:ÉvÊõ¦;H }ùèž:ö|Õ·ëKܕƒ·\Èí½öä¥Õ°u&øKÇÐbùÀŸé‚ï{”T£‡ê\¾›ƒ«>’:¿8.$€Ú'ÐÊ2Fjj*ÔÑ10n¿&ÿÞ¥/ü'dÇ´á}dRÃBÏŸ¸y7™f5zöß¡ë½ôŒ>Û¼`ˆ6ûÕö;"­~½øÇ";™š{ûw?,oê›ÜžmÚ~KF—]Ù¶ëI‰šõ̽¿ù6wœQ°üÈn¿=^íýnÖPׯ¶¯4Î6L¦hêdddÀÁ®ö…ï!5XkâH}\õé00,ˆ€Dhe½'1)IYU]NAQx%)ZÍ\ØúÔ°k56ôߟŸ¿ÿ/ùæo á{×=¥¥¥`Ä{uEùæÓ'ã°K±" ­.K“i¾» #@é#VÓˆÆ"$ÐuM>8kjjª««µôMºÞ^jÒ45žÿþwü u}DÒϦ!ÿÆá£ÊŸÖÁ¤]RŒà¨Ýǽ¹ ÒÔÓ”Hÿ؉T@é#ÓŒƒDRO ‰î))á$~ «p>eùwQLæèíÙ|Òç̧åÙ•–~’C!×2 ý÷}k´~NŽ“VNZlØüÁ7»ÊwŸõPf¼Ü>vÉI¯ÐÏçœ>éK?\ì²ëÓw-ÙwÍWáõÖïþ -C¬ÈUš|x‡ %.È{Ö±&Å?®J5öw¨Âñ…'l|ƒÜª%{Ã-ëš›aʯ ÎròœáÎÎÇÆâ×H€çPúð)6ˆ€Hh¢{¸Ç=hr|÷i¡èN^áôÝ®¶oi• å^­[6¥ŽJ.‰/Ñ­aÉ*˜;éÓ$º–ª¡’¹2‰X§ªL¬¨c×f=ˆÈ¸—àó D`®«¨±+¨'4P&ŽîÔW™D ÷Y¶ñ/BÕíSt Wΰô­tKŽd×ufH­YR˜|?ñSft¦“vËr•(êžv!á›<#€Ò‡g(±!$€D@+ë=ÉP!k6w½•ï·g-ÌÏ–Ôg\¼¹îàqw•ª+KÜ÷Â-éݾ‘üî+6Ü'iÆs˜½3‘¾Ê×’ÞísQ4Çm_åØœÂ.6û=Ü&Bspüí¡–OçsoÕ’f4é·ßp•h«‡yÛ¶†¸Púà“€€¤hâ“ÂM~I¡òk¿æcˆD¥¡‹¾P<¹ù~%G0«ò«èf&ŠäúÜ[‡ã+™m‡¦‘ÕÜ»øÚõl9Œä ?o|QN5ò?q",âìÛד eýšß/fIá¿+ƒÉ®ˆ¹’›x5‰Ï2T­õhDY:»¸ˆÁ&° #¢ó%³43!£ºô‰Àb°šª¦ÓOÓmnÆ'*´h¡7ˆD"¨¯'í:Q‹".éƒéK»È«!$ šèî'+|Ê Ä`²ÖØŸ'ÐRŠ8Ÿær¦Sg+ð9qÒ?9“ýíc¶üx±°uíCTñçW–§—y9ûyM»¦0ÈD±¥µÄîÔ·Ïswœ2ÿZ﹓ô(dº^CÈJ_ŸÙC×O1£ÉÛ-UõS ÿÌï·§èkSY „†’Kÿ›¾öU5ª?Ì"{íÂÀë9Ìv0U\?iF;Õ»òV]]§¶çºÒÖA@éƒÏ@’G€ÁßêêÕ«gÏž7{‰Ž±$ µê¶Ÿßñ…ÇvºòÛa›Ðlù±‡2ýûᅦ.Ž9rçÎï,ñ;ÿ€cËï \˜™ÛÕÔÔV­Z¥¢¢Â½¿mÛ¶èèè uJL ¼É¯ùig¬§§çøñãÅth6è&ë=¸¢Ð5ˆ¨E"7³Ç œÀˆœHQ6„ŽÀ;Ç‘@<&Ðį™F£qšoæÌã…МÂðc!Ã…Ð/»d1ѹ‡‡8±©Ž`²ØÿJµkkk''§ŽVÃrH Q%ÐJ¬=ýHDnºXL&…‚Y)Dn^$Û ®è‰Œ/Ñ€O dO7Ž H &ºGVV$3GHÉT´;̆†*•Ún| ð’Š^ÒĶMtŒ ç{]MµÈ˜×h3ùàhëëÞYÅ.½:ÙcÊ¡ü¦¾ÀyÙ' ì}¡N »ÎÇDÇݱ×(Ç^^Þk_Õtª6ß 7°`›‹ñv’ï½aH€€¢$€$•@“­ΧZQÓ=#7?­cGŸU Ʊ¯¡èéÑhÓÛÕyçéÛPSBqÙsîÀhJ ÿÑÇ:.©O!ŽK¤ è©é@cà-&ë=ÜOV‘[ï!P´GÍÑ}º?ª‚3ö†â°3 v“úeî4ÑËeŠ«ÓWâß{$UÞð½ä.gÁ ŽºŽ<›Ë"0³B×yO÷uýläàåÛÂ+iû&Np¶ñyûê7sTMM9[^¶e Þ²îrkµ5œe,®*ír#X t„ŠžŽPÂ2H ˆ/VÖ{ª«õ…(]$ÍQ“L~?ó´tÐ¥â»Á™ý–Úss>Êc39¨wë³Ën}2)¡âFIéË?–Žù¦¨šnûù–Õ3lDIUWrfDQQ4W£ZçŽwÅ‘Šqœ5´ Nh¢{ 4k®(+îT‚(LÒ4ÃfÛÑåCûÞ?žá°ÜIE9µISÈ/ÑúU×<¦³¹KVéŽðv çŸÿaâÜŽ÷[4é‹;#0;¢aZ!™PôHæ¼â¨hJ ‰îsªÊÊÊ¥%¢G‰¨äô¹Õ¯ÿ>Ž÷ Éw[fGË8Ð<é››&m‘@´eÓ=ÿ,;‰¡Z×m´Åׇ2ë"¥{g¤™î¨*2”VˆÞÜ¡E"D@GSÎ\Ï–«™(zDh’Ð$€øI yHøpMKK‡ä‚ÊÒÕÑÁð™4`Íöû«Ü·ZÈ3“šæ1e¾K¶Az—p”Ö˜p´ÆÉcºãzö<3Cvò… g –~iyLý?î·þÍáÉßg¬Üú“|ùóÛIªv†"´Ø†6[ï177þüyYU›k\eŠå¤Œ„æJÏ©öó2£59€¢GÊ.jÍu––VrrryI‘²ªºˆQ´òsÎ:ấ ÆÉc:×wæ5÷Ïýí¿†<¦[FsìmL8èÆPW¿1áhcÓÛþ˼ŽÐØ 5ï¿~m%)Ugô²˾˜ì\O`Ò,æ-ë%Zº§¤0B+½OäØx‰Øì 9b@`Ïž=Ož8p$€Z×=vvv {’㢄¥{’‚¹“>@¢k©*™+“ˆuªÊÄŠ:vm‹<£l­ach ¦¦MéíçãmJ‰z;­deùK[o¶¶H·p5‘‡ŠúVº%G²ë–|Ù yr³RÁmô%ÈX]{ÿ8Rehtåjšºrò 4y*U†B¥’Éo' Öuµ,&³¶¦ª¶¦º¦ª¢¤´4;;ûã§>ÞÀ+\Ôá_tÂu ×{i $€¤–@뺧W¯^JJJ)ñ/‡ŒòV˜àÕDò»Í6N®yFáÞš=a3¢Ã.\Ý5vÆõwOâÎf}ÆÁVr—¶Õ, ÿq.6›@$µš»Ô¦ûû\ VÞ¤%¦'Çg&ǃîá*C“Õëi *GMKGUC›®¢*+§ÐÙ'²¾ž E ŸŠò³‹ór r³ž5^ЬýXYYÁîü ûbmË#$€­ëýúõ»}ûvNfª®¶ºÚ@,Û"Ïè’W·FÚŽ³ÄÒ¢föŠÈ’ $‹Áb3«ÚÈ]ÚZË슘+ •ýȉÏ2T­õèF#šå.m­R‡ï54°2“_'ÅF¦%ÄÀR Ô£ÉÉ›YÙë™iôì¡®Õ}q kB ™àeÚÇ–kWeYIg1)\Ô!+\0­à"íàà`oo//ÏYÝ $€€Th]÷È| ºçõ˧"¦{Z楑ÍÈ¡K†¦*™Š_ï3êa‘½xa iSÀlå…Mr—nõnkrÉt½†•¾§¤°†nÜbƳM.XÔ‰{ñ8>ê)×Y²žY÷ÒÓÜRSÏß>ãŠÊ=Ìáeec.-ÊOOŠK‰Ž‰‰‰9zô(èZHíÞ¼@H@òÁY¤­Q­[·®¨¨xÖòu°ÓVɸ_uÛÏïøÂc;];½½ÔæøÙ™É ‘og¥&ÂÖ™œ½·m3«¾ÚœôgB¼À]:962þåÓâ‚\0C[GÇÝÍmРAT*UˆVa×bG`Û¶mÑÑÑAëÄÎr4˜KàM~ÍO;c===ÇL€Ths½(8;;Ÿ:u*ñU„•Ã`©‚ÒÁ‚Žb‘Âs»õMÌ­ú9õìeE"‘»Ó,¯ê**©Ø ¯¼¬´Øc^:tèüùó®®®nnnp4ŒWa;H $€D@{ºÇÉÉ >£ŸÞ·rp‚p´žW&) ?2¼û±áÜÓ;×J óHdro[G»AÃÔ4uºß.?ZÐÒï ¯A#¼_={ðêùƒ .À ¾Q£F’‘é¾7?LÆ6‘@H t—@{ºÎþÀȽ{÷`ËÆÀ´ww»’èúpýÞÕ3…¹o@ñÀò˜ƒ³»]YôG,§ è8ÌÃ~°ë«ç_<¼uæÌ™ÐÐP__ßÁƒwßÕZô‡"$€€´hO÷ Øû¸ÿþËð;¨{Úz2*ˡ†$ÆD‰ ;Çþ.£à z[…Eó>…*ÓwÐpØ‹ ¿ûâQXpp0ˆÝ©S§‹¦Áh@H ®ø„îÑÑÑ /¯^½Ê{“®¥gÔµ>$·;æù£Ç·.ÁÑtmýž.žÔµõÄw°,ÑÁe$$e ¿}ÎñýñÇÞ|i4žn_8h9@H HO§ßòññ¡>¹sU2Ì«Q”—Þq÷ÊipXvóýl¼ÿ±=ï±È+*¹zOï¡€ÂÂÂ~üñǸ¸8^AÃv@H —À§uOÏž=mmm³R²3R„k«èôþ:êÙÉ݈=è·hU/8Í+Q~ß°¶7iÞŠÃ=KËʶnÝzúôé÷YQEgÐ$€@%ðiÝ-Ž;¼\Þ¸¡h:Û„•¯gÔÝ8{èÖ…£DiÔÄÙ#ÇÏ×` #w8°Žåàø¼5qÖ 7o~§Öꎩ|ªkfÙwœ¥J=ÔBBBöïßî>|âŒÍ"$€_ tT÷€ÌWOO¤8à«M"Ö8ûá͈I(¯H7û iØÛj‹?œðš8g™Ž¡ÉãÇÿþûïêêê¶Jâ}$€@¢I º‡L&Ïœ9œo_:ÉdÖ‹æxxk›ÝpûâÉ—oƒ›Ë„9ËÔµÄ8<OÈÐää}¦/„µŸ×¯_oÞ¼¹²²’'Íb#H $€C º ‚¾ÁRy? ½(û„Ø ˆžÐóGã"õ ŒÇÍ^¢¨$y'€‹L¦Œœ0ƒã霑±iÓ¦òòrtŠ] $€à Îéèœõôõa·+#9ž'ˆf#ÑsîdV‡£[ÞÓæËÐ0QùÇEOgðïÎÎÎÞ´é¯ŠŠ ÑœD´ $€@3Ö= eÞܹT bØ@j* ʾséTbÌ ½žfcüæA f·†þÝý†¸åææ@`Úššnµ…•‘@H „@§uX¥««ëççWSUyýLp‹%;Ú Dh„í--ýž^SçBÎNö-V tm;À%33Üœ †XÙŽÆ"$€€4èŠîNÎÎÎC† ƒ]÷®•0l1Ͼ ¿ ŽÌ°½EEÑó©Ù2Êע””}û8çü?UßGH $ L]Ô=`ò´iÓ Œb#E=¹'Ìð´ï´„˜{WÏ‚ óh?ôéé Yâ°Ñ“aC0""âܹs¬ƒÅ@H …@×u•J]УG‡7/€\Šõ¼í´¤0ïæ¹ÃT=xz«ãlI$’çä9°BvõêÕðððŽWÄ’H $€L ëº Ñ(K£AªÎ1ÏÖΨ«½zr=ƒ1bì4øð4ˆ{wpÞÍkÊç²rò‡ÊÊÊ÷á ýH $ ©º¥{ Dp^´hÝpùøžüìLñÅrÒŽö2¸·µøŽBˆ–ÓUTÝÆM¯g2wí Âã]Bœì $€Ú!Ð]ÝM[XX,X°€Å¬¿|ì¿ÂÜ7ít&²oÅFÒ08F$€ø˜ïu´®¥¥õÍ7ßÀ9¯¤ØÈ3ûþ.+.Aè™)¯Ó“â Lz›ZöAóÄÝ$¥jp¶ \|®]»&îcAû‘@H@bðE÷EEÅeË–5 ‚ ŸÜýW|¤hîihh€ä£hxðHÜáâ×Ãl7pøú\¿~½¸¸˜_}`»H $€:C€_ºlU1qâÄ€€*%ìâñ맃kkª:c˾~ù´¸ ײŸ“ª†6»‘î¦ÉŠ“›w}}=æí’îG!|Ô=ÜQBhŸï¿ÿö¼’ã^Ûµ!)FøŽ® ,Öó7!×z—Q"4’hŠi[M]ƒ§OŸæææJâøpLH $ fø®{€‡ŠŠ òòóóc³˜ÉëÊñ½Âõø‰‹ ¯(-¶vt–SP³é?s‰ŽÃÈíÛ·c^À ¢ýšYö…—š–n×›nZܨ“c_†‹®¡1¯Úìx;ŒÌK»Ùr;"·*ÉkÙzLüú9Ãt„?C7JBÔ3+û؈G¯_¿† Ýh «"$€è:QùØ¥R©.WZZì†<þҚ«‡º–‘¹%œ Ò10&‘É](I3@úôé;Ž—u§®Ô­y¾mªßÁ,‚ú€É“­UëRn^º}vǬtÙÛ7gS»Ò ÖéÓw 螇¢îÃÙC“‘@B@˜û\­"ìÙ³çÌ™37nܸxñbGGǚʲÈGaíÜ»é!G‚žÝ»ž•šÀ¨ëÊæMRL$8š˜[;´Ú/_oÖg‡ÝË"ôWl;¼wÍý©Õ 6Uü`ÃÊ‘F°-elòÙŠ=‰Uœ †œ}ŸÁ‘†ížîcN·7µù*8&çý”í u¦­½XÀ$4ä˜eœWúkòX º½¡î”•‡3ZKÊȼ°uö73º½‘Á”ÅÿD—5ðu¼­6 a± ’´ƒ£O«ð&@H ~•õžfãÇgô “ÉLJJЉ‰‰}“š˜•’À-©¤¢ªª©Y&”TÔè*=à_9E:l¦´Å«¦º2;=IÛÀX®ÔVþÝ'ÓuUI„´¬3‡NyøÔUTvXu˜«¿òO¬õ_ÿˆi=ù‡u~ÙqzùrþgרQeä9 A‰;ÿº5Ñg†Kðž{¡k}’Ìû =Ÿ¹#(*xiÐøßõ”åÌ_Æ®]Qk¾ÜêórSàñS‹¾±r žkÒde¬úÙ6¿é3M¼ÿ÷Ÿc͹¿7® `]Ø5Lðeji÷üÞW¯^Á™>wÝ!$€"ª{ÞÏ  Ø R}ÕÔÔ¤¤¤€ ÊÌÌ|óæMZB ¼>žEH%'¯ C“ƒ/`3‹&+ÇãY,&M¨(/ˆ…f}ì„2ë$q«¿;¹pýÍ‹ßz\ü–Ô£—ó@×±>3f:Édíæí93‹nÙ¯¯aPú¹keßV̲ӂX`kƒ‘ÿÆß'ªÇÖ^¼/«DkéŽå¾ôØ¢ó3÷羊.dõl,C´|Ã*/MÒPú½+~ÇâNß*ô7ÑúhœUÛÎdh#~ œæ®ÈÀ¸yé—;Ÿ{{ª Zø€Ãèž/^ îÊsˆ"$€€¨ëžgHNNΪñâÞ¬®®†¬OEWEE„‡©ªª‚ûµULP9µµà6”ohŒŽhla#œ)'Ê÷šæšOĽ+—=¾ÿüþÝ«AðÚí6l©­|Ùƒ g¤æU²¸Æ1k™ïϸêÃGÄØ4@¢G@ò×{À…655UMC‡*óÖ¹E³@R´š¹*x檖}«8M xßå~ø»Bû3¦¼»­7ñRÅÄwßèÏ1Ÿó »(¢ñž¢å¼í!KÞ7Áù‚¨6å£êDšÉÔÕ'§®nRDhßèôÌÏ÷’ tñÚ4v8Ýl×ñ\õî,`ïH ˜€ä¯÷À™/pîÑ6è)`²Ø][´ôá-£mÀû‚! (OY1Ë\_KW}{AH@H¾î`?²r‰n´¨7æ\ƒLHCèPú} Ð$€L@ò÷¹²²²€©š¦¶€É  »¦›Yè7]@ÒYˆ¬º‡74»Ý WúlNÄ ¯n³Ä’¿Þ“ l ©…̆t˜†zhhggç°¹q‡¤cÔ¢M¢«%êŸä¶Ó&8rýð¹öù²i{<à¥ØÕy_¯>5ìZ ý÷ÅççïÿËEž÷tºEº²*Ôì"=z@Î $Ù½{7$étCX¡Kà–õpë%¼ƒ€äpÝ+ ,&“®Âù”åßEÓÔxþûßñƒÖõt«©!ÿÆá£ÊŸÖ¡ò(DWáÈÐ=fffðœíŠ‹‹£RITŠä/@ ï'»Þˈ.+Ó<ÒJŸO¢ÃH ˆ; ×=eee0CŠJü ML1™ ·góIÿ3?„FdWFXúEH…\Ë4ôß÷­Ñú9A:NZ9i±ñ´1}2Cc_• ضcÍEVVèúE{#«ØÕuš¾›×/¶/:0måž„ú·U{âþí*aÿ›»ûYY=SËýçý3ëúý£é¨UX”—^añݦõcåÂ|§œÿòÔ¶¡òµz­6;zu¼vã§Z}êÙï7<—ÿbè¢L«3×v¹nOŸÖª%«ŒÂþØŒt¾‰îzOI '-ÇûË{¨Ž§³ºŸë¡Ìx¹}ì’“^¡ŸÏ9}fÎÇÓÊ.»>}GÑ’}×|^oýî¿Ð¢1ÄŠ\¥É‡wØP₼gKðhRüãªTcïÀq‡*ÿXxb7¹UKö†[Ö57Ô_Aeå8»mÜÙéÎã‹uùA¥?¨b›H ˆéÐ=rrüÆMѼÂ7è»]/l=ÞvEV6”{µnÙ”:*¹$¾D·†%«`î¤O#èZª†JæÊ$bª2±¢Ž]›õ "ã^b€Ï/¹®¢Æ® žÐ\p@™8ºS_eÜgÙÆ¿U·OÑ-\M8âAßJ·äHv]gHjÍ’Âäû‰Ÿ2£3´[–«D«ªªÚ-…o J¡¡ÇŽ‘à3 ×=ÜOVZãêŸ/Y³¹ë­|¿=kaΰ½”qpñ>溃ÇÝUª®,qß ·H¤wûFDò»¯ØpŸH¤Ï=`ön2éû§|} éÝ>EsÜö…P®Ù©o6S¹ñ6šƒ|^w$Ÿ{«–´0ƒÛ_þ••çÌê¾ÀåQ£(}x›AH@´ð̓C4†ÉÍ;-Cãûz —¨4tÑŠ'7߯äèfU~ÝÌD‘\Ÿ{ëp|%³í4£²zƒ{_»ž 2‡‘|áç/Ê©Fþ'N„Eœ}ûz´¬¿ñ@³âÛáÅ,#éï/üwe0Ù1W`¯&ñY†ªµ(Kg1ØVaDt^£db–f&dT7€e$‹Á⪤6ç…¦ÛÜŒOTh³¥¼A¡P¡äé@Y,"4x¸]hè±c$€øF@*tÊãÀ&kýy-¥ˆ£:äL§ÎV>à;sâ¤r&ûÛÇlùñbaëÚ‡¨<âϯ,O/óröóšvMa‰bKk‰=Ü7¨oŸçî8eþµÞs'éQÈt½†•¾þ>³#†®ŸbF“·[4ªê§@ÿ™ßoOÑצ² %—þ7}í«jU˜EöÚ…×sÚË€NTqý¤¼¤H&S0%;/ò§-”>üክ"$ 4DÉŽ™{öìÙ«W¯Nü|™¦®¡Ðó¡ãªÛ~~ÇÛéªÀ‡ÆÓäž?¿ÓÕÑþöÛo¡»èèèmÛ¶wÓÃó\‚ßÙ^*«™È"+¯ÚÝÝ}òäÉPæ f-hCg›Âò"BàM~ÍO;c===Ç/"&¡H@0$Ü¿‡ ‘ܸ«‚—¨àîBŠšUhOK÷õ©½6ç+Ïëï<Üšä´¨ä”=_CSÔg…ÏŸyø@B¶ßWI›ÌÅ÷¯‹–S‰w ÝS_W+a3­0üXÈp±“¬¬HÆy{®|Wúü¶;Ò—ó¥Ê8¬üââôzŽ‹YCå•ïì` Úõ«ƒ^ã¯+Šº1ŸãR0ã„HPùjïÜ9ÖÚ‚ð ä3Ml H¸î¡Ñh0Z>:èâÓÔUõŒ:Aù]uÕD¬×”@lryqCAAÁÐÐðñãÇüÄCR·é3Ʀ±VIöfÈÿ¢;ÂÝÖ¼•_Wl›Hné×-㪊ë*–>.F–8 ÊKøÑf·Ð`e$ $ܯ™{`›ÅlcÁ\&Pl‡Že\U*¶#.ßDï;—&''¿bÅ MMMa¾:i®ÍBóß^ü=o Eï›å/9?ÝÌü¸_ýªgº¨·húóªÑUïÔ$Ï·]hþ{Ü…?ÿém±ÞUrÙñÛSÎ1GXJÊ»wy²ÇײPEo!}àŸóe”×nq t:XN(}<Ì|¡òâלȧu…g~ÛncÀm|`àõ»E ´´¤±;³ß¢þ°ÑÀš]bêýIAîÁo6è›À·K-Ü‚À윺ÌÒk[‚ìû~ÁiÓlÍðµáqÕ÷[¶É¹‹@<# ẇ»“‘óÒÌ䃣­ ¬]zu²Ç”CùMÏyϲO@X#ú13oþÏ}ÌÀ>ýí—íŠàü‚e<üuÌD×~c‡ ÿñ\†ˆÀºN RÔ=<û9æsC‹>fùíÐ8Hd9 áMÈ…S:ÞWC–| «@¬’ƒ‹ÿY{‡¾ì¿ïâîÿðø'Âé`Ï_R8?DD²,…täÈv²ÛõÈõÑ«–˼\x-ªŽÐP1{öù‡æ>WB×'Ý^½íÔê¿—?•_|~õ©t‚’ã¥ç›36™+6TÝønÃÄC¾[ûø—¨½î:wN»N¿?K--iì.ù虓IÏâ·ç\ã´û„ÝWl>‹IÜ‘wÜ™qéDÀÙR»öáÏzþ™d´ òÁOO¶:2Žîº*ºÔTË6; !$ÐQ®{9žµ5]Ty-)µ IDAT¥ØÁr#7?­'GŸ½µ¦¡èéÑhÓ£Ô›g‡ì`s-‹±Ëï~»)fâö{qW®ü¸oÍ­ü†ÚÈßÿ|ê¹éZĹSe›Wß+‰M¿ÚFÝ;&-Ç€wD€h‰¤¯­©·üë»Áîý z*!º÷¦µQWç5ÂÐÂXgàhÿ —ɾ“€»`*N[-d‰Uc?}Bf«r6#/=®Nnè”®š¦æ&—ÎtañW½edå”d M ]I^YŽÔPµþx™ñs·ø™÷1P·æöï/–”èÐ1áÓÜnwšÎ?ÖS‹FÑ4ÈKP!3hýT}e2Y³ÿ€1šìøð‚ª¢WëöjNŸp±µ]O-Ç1¾‡×ž 9–Å>-Úµ'íAâM@:tOµhèE{Ôݧû£*8MCqØ™»Iý2ƒ?wšèå2ÅÕé«ñïùUÞð½ä.G@žQבgsYfVè:ïé¾®Ÿ¼|[xE#mßÄ Î6>o_ýæoŽ–¼õàÞÏd$%cCùÚ Fí›»ÇxÀu×ÑúÑ¡¯9M ýª«å˜A§Ó…n Ð>Ñ=o핱°0ïO¤(ÓJ/oÙiïôµ²Éb£žW„ºÚºÆÍ(¸ä{ë¿=ÓI”ïYçê+lš‘­^ͱy›>û=ìTx~Iƒœ…ƒ‰¥Zs¢šÔ×1 ²ŽÎšïz#ö°ìeB({_Åm¾‰%Ý)õÒÓä6C–Q“'(˜¾û–$£*O¨­¬¯I‹ɤò2|÷@Òu²6%d_Ixû  e›oG‚ÿCH Ûšÿw»AÑj€»ÞSSÕ(4Dà"iŽšdòû™§¥ƒF(ß Îì·Ôž˜›óQúÒ˜ÉA½[·“]vë“éK95ÏH1óoü}OwÖ6B^V¹¼{ΊQVU“P›᯲T7ή÷´>Õ"sWdEç9§S?¬“V¥¬·uK]ßß c¡¬$SÿpͯӢ?p¤PIï]Ÿ?|A7ßry•õ?×þ;~bò? Y-ßÅS¶-³ÖoúK‘U iä@¯|ð&É)€»suƒ»AÝÄ’Æ>ÉÔžÖPLiÞ;³º²ŠPaƲfÙ*¹µmµ)2" $,ƒÁY턘ø,ç™§2™ GIäøŸ­RôI¸îQSSƒ9¨(-•™ i ša³íèƒò¡}ïÏpXœÚ$}黿P[\×ô¥ÜJŒÜKË—þC_vh¶¥.î½ÛÚj–à«E‚»Áîì®Wì©3DYô4Žãƒf¨Ž{x$SfìþÙ«F6ÊvÕ“šy²QÔMý¸xÑ eéÉç]ükÛ$µŸïû+}̉¬›åuE\OèÆ7@ Ár¥²*L¨ifÉÇÛùš¬@W$Ð\7¹¡/,>½¿ˆòÚJdBy×Úl§;|K" TVVæåå6^EEE¥¥¥p§¢¢þ办VG Òžh:, 4^ðKX]]þÕÐЀó  Z­%a7%\÷€¶•——//+™i#*9}nõë¿ã½BòÝ–ÙÑ24O_úÁÔ·¿ØÙ\-Ô"ohËô¥{þYfYv9pIú—G~rRƒ'˜ÒÃP¹*«˜IP¡°« óˆÚê"2§¢”3#ðó&2ó‚†4! ò¢§‰µ µÕÕšŽ*…ûÃÌŽÜÞ@Pý„ίL‹¿#ëæÙSLR62Ÿõµ_Ò¥Ÿ¶<.¬mª{äŒ-l(ŸÜɯu6lüáaGÇ'ÔVXÊ“¥]{ndzÛQï&çÒÌzis…»¦4±XVd¤Åë!¬Õ ’’’¤¤¤¬¬¬ÌÆ«¼œ+‘?”¤ÉÊÉÊ+ôÐÔ¥Ñd©2œ.TR£”Ã=ì†6»¡®¶¦¶ºª¼²*?¿€Åj’ºˆL¡èêèèëëÃÙ…ž={Q(’©$sT?2 dóò Zyˆ„t „Ϥk¶ïØ_å¾ÕBž™Ô4})óÝÚ é]žQZcžÑ^'}éŽëÙóÌ ÙÉ6œ5Xú¥=¤/õo2 fÆîÿm&/=Æ=𖌮«Kíÿ.dÌ^nT|=$Ûaª…H„a+/-ãšéž´7U÷ž iZ¤®[]MYSƒÖãÿ‰—虓3îcCz~úß'ó¾·VÊŒÞðó}u%ÂÝô'©–m>c¦?Z¼àeß3k¤K«M¹{ý@:¥_à{?ž·IÃæûi*#víûª÷Œ@'•úÄg߬'ž¾¨7•Ð^»öž(’šõºÙêΛþ]®5-pˆ¹0íðïб¹|oŽ—HüaÒžñøž ÇÄÄ$&&‚âE·]‰Ê*ª¦}lUÔ4é*ªJ*j𢌱2‰Ô¹ÕÐ@ð'(ü6†ËJŠŠ róò²AQ=zô:¢R© ~ÌÍÍ{õêÿJ’’|Ý£££YUQ¦@WäóÚv_ŠV~ΙãC'\·”!Ð8éKçúμfbâþ¹¿ý×¾tËhNÕÆ<£þg uõóŒ6¦/½í¿ÌëÍPóþë×VÒ—2².ì O*Jó¾ý4AÖñÚ~q©Í×ß ýüσ ‚šÓ7û+7ó(hÛL~¾SR˜¯¤¤ô~§Öä ·ñ¥ðâg·Øö2qË7}©pv¨é%v¢Ì'ë Ú÷gòÔ: ÈY-ûiÑfÃgùcO-ñÚV{yJ[³®â2íæ™Ûƒ7ן ºÆ¾ß-ÝÄ6KIIüwQQQoÞ¼á\^‘BGÛÀDS×@MS‡»œÓM&°>DÓÖS×Öû¨vyIqa^vnVZnfJrr2H®Ë—/CDKKKkkk[[[ 8"áyIa:!/)d'3m¾¡©E7Ÿ¬ÎLfýî k,z÷^¾|9·Aø9‹‹«©ázKð¤l¤=ðC‘‘‘ºG|T>º:.z0/i{|Åá=ÌK*‚³¢?yòäéÓ§°¥æQ¨2ƽ Íûè™)« Á+ ¾ž‘—•ž™ò:=1¶¤ã, ¡€ûôéãèèhoo/¾‰†$½GO#f‹ósP÷ˆÈÏyIAì4sç…k¸ÚÁ"bž4˜Á]Çn6ÒŽ‹i@„cD#PUUõðáÃû÷ïçææB§à£cå0ظ—µ®‘)øÜÌŒ–Q©2úÆæðrró./-õ“÷266¶Þ>Ü·oß¡C‡Â.XËŠ"~G˜Lƒ†^6?;S0Ýa/Ÿ$PÙ á‡ýý¤¡ÒTE4Í6ŽUT¤¦¦Þ¾}ûùóçõõõ°uÕËÆÁܺèŒÎzê`ny¬{˜™ççyþUÓP]PZ-¯¢®@¢õõ” y°ÿ/Î ¥®]õ©a×jy[òòÄyk¦.9ub¼^³€]›|ì·¹O†];÷£'þ¯¯Êò2Ø'îׯ¯ÆöºBEOW¨a$Ð%/_¾3§ó%ÉÑeø(¿ÅÊLI žæ7kð˜ŠÅæÀG`Ù#$äbMM5øÁ¸xއ¥'ÆOß°»Û^/Üú÷ßaÿ³Ï>ÓÒÒ‘áK‹î±³³Ý“÷R0ºÂ;)˜;éÓ$º–ª¡’¹2‰X§ªL¬¨c×¶H/ÊÖ6†¶`j`Ú„‘Þ~>Þ¦”¨·ÏYÙ°EÊÒÖ›­m Ò-\M8.EúVº%G²ë–Ÿ\ZddÏ^œ\[š˜Áòž¼_^{Ü¿›—X·º&Ä«‡535QW sÁ«±®€<·000··ºk!ö @$žÀöãî=ÅÂn¿·Ú·GðïR(Ôþ.£zÛ:>¸~>>>zýúõcÇŽuwwU$xcšõ(-ºô¦‚‚Br|”“»÷Ç™œù7f—H~7Ïœä[-ҋ½5{ÂfD‡]¸ºkìŒëÿîžÄ5ª>ã`+)KÛj’®sS{A2F"©Õ”¥6M5Œá¬£GgÊ/OúºxËÎúx“c_ÂØú÷ïÏ?ìØrGŒ7~‰Â/ ŽX‹e€€ež›7o^¸pBòô¶í?dÔ8ðü#ûyk*œ%òœìεw.<}útDDÄìÙ³!yo{élkÒ¢{Èd2—|ðàAÞ› -=£Îbâ]yÙéE—ô¸º5Ò6pܘ%–5³WD–L X ›YÕFÊÒÖŒaWÄ\I¨ôèGN|–¡j­G7Ñ£žÙ‹’6ÌV^Ø$eéVX«jý"ÓõBVúþ’ºq‹Ù'7¹>´¢4úTлïñ¿˜¿7½(³ vÒ¸›F£¶]^aßéóøðçNQ^v^VZAÞ›¢¼œâ‚óCþÆš&H~øÐ…à¤Dé}œ®êò (Ƭ¯g±š'{„°1 y-`SB1AŠ`G­cÀ»H !€cê°·I'ÀµÅÅs‚4/ó´œøUïê=Õ´]Xȱ£GÆÇÇÏš5‹›‘ºea~ß‘ü¼¤ï Âa®µk×–—WÌZ¾Ž'Élù=7o¿ê¶Ÿßñ…Çvº*t¼¯KåçÀuðàõzrûÊÆgΜ9pà@¾öزq)Ò=0øÁƒ‡\¼ý쾄é…áÇB†·œ\>Þ)/)Š}ñ8ñU¬Ê@7°Šâ˜Z÷¢Éuzk¬#†Â~9¼zš[AaØËÎHNOŒËHŽ o¼ ø^¿~ö k1íWG`b$€xB ++kçÎ………fVö®ÞS`½‡'ÍJv#ðǰýà:7ÎÜ»w/œ-8q¢ Wî¥K÷@Ê4Çþý!’Ò›´$½žf’ýlñctजÿ*6âaVjÀ†+ˆ= Á¸tL`¥‡=¶Ú&Dy70é /aøAràäØHpÞ‚ Ü€†6&Á ¡­Žo"$ ¦bbb‚‚‚ Æ cì» 椰˜²ji6¤Ÿøù²«'÷Áñ·üüüùóçÃé––ÅøqGºt„ø {^<º…º§SÏS}=#>òIÔ“»°Òj]ßÄÜÒÞɸ·‰DîT; h?@¢CrCö Y9…Ñ~s¥$ 3Ÿàƒ7§çÿ;—OŽÿã?–-[&€\îä~øOãÙf! lˆT•—ö²­¤!¢F )À ÊõÓ`¶–œÝGŽŸaba+Ëž ö×Á«Ä]EŽ˜ÅÅÆ@œnXˆ†“_èøÌÂÜFžÃuyØ&6%HUÌ;Ï ÌÌ̸i›Ùµ¸÷uþüyXQ†µŠ±3Ô4ñG »ó ={q7c£ ʉ­­-¿W}¤n½àB +++Øš…“Ap쨻“&¡õ³Ó“!¾xaÞˆ»Ðè(»ÃÞÇÚý“ÉH!Ò!\SÄý›”®¯¯/8¶wö|™è-DH@``™çêÕ«=Ô5ÇÎWTX¿ß‘ãP99…{×ÏmÚ´iåÊ•|Mæ%ë=ðSø ,.ÈícÚù›¡]ìž×šªÊÛ—N<º!s¬{NžÑ6a½GìGCGߺÿ •š‘’ø"""66~Ðé§ûS‰ë=Ýg(Üp½§ üÏœ9;\=4´ÆÎXŒ¢§ Û¯¢©g(¯ ø:&V} <Dío¿|—ß•RÝ£¢¢«ô¯ãbTT5Ô4…œ+¤Ë“ÇŠ Ñϯß›Ÿ¾2“ýaÕDÜOfBThØù‚cöµÕÕI¯cï?xûw°æ'Èc“ü˜)ᶉºG¸ü»ß;êžÎ2¼t鬫jhûrDº v–_‡ÊƒÓ­]éõ«/## ¯Q‡ªu²àÎwÒ0¾?~<œš »ü>¸0ß»íj««®žØzþHCkØèɾ³C.Ñ6¹ÖÁg#ÆúùL[A€.^¼øëo¿åäät¢>EH@Š €› øôÀyðé‘Sà×:„þ0tKûA.ã!ÓÙ¦MUTTðƒ‰”®÷J®Œz 9¡ M-øWŒÚÌJM9òoAN& ðþlAã! ÜþWDKû t“^Ç<|ø¼ç„%]ŒŒ¶LÅõž¶ÈˆË}\ïéøLAdÔƒ**©€O"]¹ã±d×pÈ%ÅEAlÇÏó„ŒÒ»Þó1jÔ(]]]ß Û:]›‰¨Å~v÷úÅ#ÿÖÕV5nÌ´y°""ãj}Ü:cüæQ¨´#GŽìÞ½2÷^ï"$ õ’’’Ã!ÖÑ~ó É Ôóps×ÌôôtÈož ¼íUªul†ä 6;,äøÇ9Ãy‹X”[ƒ WŽï{z÷,„Lü|¹í‰\æi9°¬5eÁ—°¬õôéSÎ^-Ëà$€¤œüfعs|FÀñôðÃàì1ηGGGCÎWÞv-ÕºPB:'7778ØõøÖEÞ’ýÖ Õù™}§%ÆšõÑ#m?ÕSÌû³…vƒ†¿ÉÎþý÷ßEÊÐB$€F ¦¦f×®]••ý§j ¬_ìˆKrAÄ8ø` ½{÷îÿÛ»°¦Î.œ°÷Þ K\€ Š ¥î·¨ÕºW«Ö[WmÿÚZ÷ÖŠVÜqï-(*{‹ Ù3lòŸDF dÜÎ},…{¿qÎûÝä¾÷|gˆ–¶Î{Jpp653 zšœ-Bd >¸ò\ôÝ™—áì>hø¤9ŠJbq›'8ÒÕÛs”§×´²òòíÛ·ƒí‡à£xˆ" ØlöÑ£GSSS¡‚&f¸• æg·„á“¿è¹Ó§ÏÄÇÇ7nв3È{(°Û5wÎy:B™Š-Ñ\½R>Ä^9¾¸úðvõÞÆSùÙ9:š:>`°‘ /äZJ”@ÄÀ;wÞ¿ân‡‹c|S@Ô44‡ŒŸ e°:TTT$`/þÍ÷pðïæI“&A¾¾ÛŽW±Xü!#ûÕ¤¸Èëg³)”a“æ@Ä ÙÕ‰üàÊãA¼ÆÙ³g¡°—HÆÄAD€¤€/3äeV×Ôä5µ¸<y¥`“Ñuàˆlÿ÷ßÁ×zQ‘÷Ô`Ø·o_www¨\ñäÖ¥ÖÃJØÀ›çƹ£PCš0z¿î2iéŒñY¬©­{ùòe® † bE ¸¸øà¡C`ÿjœTë\8¸€tuígÕ¡3T—‚"!váÓ yÏp¦L™bii •8CEéBÅ} _¦[çÑéò£¦Î3±´‘ðìÄŸ^ïÆø,Ê;زÑ_`”@Dޤ·(ÈÏw8ÒÐÔBäƒã€-E€ ‰gá+ÚÿÚµäää–RÓyÏ!9ÒÂ… µµµŸßõOŒ o%²Dëž–”póœ/×ÒcˆÕX›XH‘>rê°ú€•2´6Ñ O#ˆ€l"Á ¯_¿† ]]!©B c{ŒšÌb±`·«²²²5’!漢ž¥K—*)*Þ¹tâSRBk%T_Ô¿~滪jØÄÙÉiÀâÛ!yãéÓ§Á±‘c¼Š 2ƒ@~~þ©S§¸ÏWtë!à²røhϾŸ>}ºråJkÄCÞÓ=SSÓ P)ìgŽÈFçÒ’¢›çŽV”—{ŽffÕ¾¡Âøw# ‹#äf…/ÈæüáÇF×ñ"€È ðªÎ=îƒÇÀ~Š ª'*¹zŒÐÖ7„À[HåÜb…÷ð€ÎÞÞ~Þ¼yÁ9àÔá¬ô-Ès ÂÓ€ÀäfC4¦µ}ò.eI!Ydh›êž={ Ž@ÊÒàôˆ" fBBBÞ¾} o†ö]{Šy*¾åÐèô#&V±Ù'Nœhqý ä=¼ K—._=»¢¼ÔÿÄþŒ”DÞÈpöÉ­‹©I»¹Bö-2ÈK Á¦ Ë  ðÁƒ™L&$CQD@¤@‘>Øá‚gj¿aãE:0&zŒÌÚAy餤¤‡¶ltä=Mâe`ÁêÕÚ¡Pyjb\“í|!ê}`DðK}có¾ÃÆXLâŠæèÒÛ¾[Ï„„„sçÎWJ” @Z‡„pæææºôñÔÔÑkÝHØ[¸yŒPVU/Ÿ‚‚–¤FÞÃo‘œœœ.\@aWœ:òš_Sâ]ƒOn\TRQ:a&F'ž€ä¨ßÐñúÆfðbQä¥DaÈÎξÿ>0žn½<„é‡m¥†€¢²ŠÛÀ‘eee-spFÞÓÌÊuîÜùÛo¿UVV¾ïúõ“Û-»™ĸ n=w/ùÁîÌ ÑSÔÐG¯‹¦ïÁã|ýüüÐѧ@bWD€ @Ò øªt8ß ºB¼ÄêÐ¥»ž‘é‹/ †¯ëüÎ!ïá‡÷šÍš5k ôõƒÝ‚¼•åÍ÷‘v‹Wodg¤vêáµÖ¥- éç‡ð.÷!cKJJ N¡H²¤“T`ûÍ›7ÝÃ>ȵ¤P»× QàÚ|þüya%GÞ#b?üðƒƒƒCBTè…wäçd ÔMJÀ‘ùýËGÚz†½”’²6­}×6Ý¢££Ÿˆ@FŒ=`Ãïí9º c@VÕͬì,m;FDDDEE ¥òAáRUU…”†Ã† Òsîð6ð´§dÛUU±œ…ý¸#'BI ÉN.˳õ:Šõ\¼åx† IDATx’›É²ž¨"Ðfˆ©>¬ì±$I×ÜÕc8…J J~ä=BÀ%''çååµhÑ"Hèüèú9(û%Ü…è/‘¦oŸ?ÈÉLsìîÁ~™°­L¢¬¢Öû«Ñ¥¥¥Ü¬­èŒz"2„qÁSÓ¥ßW2­¥,+§khbÕÞ‘Ë_×yàXÕ´„Ô>ëÖýêèèø!:ôôþ-1¡o„BlŠoŸÝƒ nÀ‚ñ5º¸€e’› kVµ 8"€´ØØXعngë odÖÚ±°¿ôèÞw0W…ø@Þ#0TujhhÀž×´iÓ¨”ª{WN^;y ?¯\¯¬¬€ÀyÅ–(†}šA€ ™ ÁìwöìÙ§ mf¼Œ AàÎ;0Ow4öHmñMQ]@^‚‹º€³ 暴ÍÀ™¼_¿~ëׯïÚµkrBôÙƒC÷ò²Ò†í$øwVZJtÈóö%8mÛšJGߨ£“DN>{ö¬miŽÚ"2„@ff&¦0±´´®2¤VU¥«[Ð’0 ¨?òâÝLKK Ü}–-[¦§§÷þåÓ{ÿxûâ³²‚wk1Ÿ |xܙݿƒ•„ÅŠtÏþC¡b3xÒUVVŠu"@Ä„<#!'E×~b‡•$À_Áêüö­€é›‘÷ˆ`uÀ×ç×_õöö¦ËQ_Þ»æ·ç÷÷¯K8ÍKв°±‡D"P ‡hHݹG_ÈaøôéÓ¦[áD (‹ëùóç ¹#AED±„D KÏ~PJ\ÀŠ]È{„D·‰æt:ÝÓÓó÷ß3f ›Åz~çÊ;7½¼PRTØDŸz ¹¤)=úñ¸8/À¬ª¨¤|ãÆ 4ùð‚Ï!„F (( ‘:º¸ƒ»¡EáFÀÎÑIEUÞEY,V³÷4 ‘ ‡þÇ¿7NIIñíóûÿíú ÂÝÁ#Ö<¿Ùé©àcdnÓÁÀÄBq±iKÒÓ©{°©¾|IÐ4N-Õ û!²8çÑh4ôƒ”¥•–ã,¨KaaaXXX³z!ïi"¡@1¯!C†ü¾yó¬Y³,-, Ü=àÔ¡»~ÇçÌOIB'@‡ÀÇЪ«+Ç· É Ð¹gH yïÞ=±2ZÉ肳 m”””?¶kï¹ÚŽÖmASûn=AMA"N°L·¸îØùêU}ÀÇ Œo`YÇgø§¡¥caç`ic®Xty…ÖO_RĈ  ^]æÖZ?Ž 𥠯o_B–tpð°6Cé"ž= €}WÎ3YBJ3šZ‚½‡Á`¨««óQ ypDsÉÌÌlòäÉ'NŒŒŒ„xïÞ½ z ÿ Ð7lKAVec kcseÕ¾|D‡AõõÎÝûˆF\E`:÷ì ¼çÑ£GÈ{Æ "ÒD¬³ð ª¢¦^Ò”çP¤=#õãëׯ=<<øÌ€¼‡8¢¼ÉîàéÇôéÓ“ skZR¸ÁLà“¥ch¬£gÕ¿Õµt4´tÁ¢ ¤¢B¥òÛ‹,/+ {ý ²Ú8t¥¸„‹sv¶Óœ÷F‹üÿé $}!—¡Y»ÐÐPØT†T–Ò%@¾@Žfø´vêÍü¾Wùމ‹€µ}ç'·.#ï!Ö"²®>F Ñ@>|ˆOJJ‚TxŸãRbˆ«¤¬¢ ¤ 'Á—–{©²¢Š2+*ÊJ‹¹YƒœÝ¤’ ¹"9àðæ-þƒÓK@4Ã.CƯþsvã¶Ã¦º¹>HIËùСC‰uŸ¡4ˆ"Ð(2ç¬í»4º‚'deUu 븸8þï¢mç EÄE•——o_}p…«¨¨€,¢YYY9ÕlRÃO¨… æÙâ‚\n3ˆS ÑÔ4ÕÕL¹-í»ºJA½Ò7»'Mþ/…¢×ÓÛ»“NyÂÝ€‡—öú|Tzxw†U[©f6x½ DÞ#…;§D„A¾EÁ&–ÖÂôödBÀºCçÔÄ8ð'‚ Mɼ§)d¤p^AAœàpnHT°zõjØ34•BªõÊOž¤P(fßîöÛØª± Fÿµè¿$#ù’ 6E.ïÙÖßÖïy[EÓïèõó†ß¾¶S¥V¥îº,²Ã¯ÿLz÷ÏŸWSªÚ Zwze»³kWlÎV±÷9´sýHÝìc3z.‰°Xøó¸ÇÞL.Q·›ð×ßL³hTp¬"ùê¾µ¿]}™[©e7âû_þXÜYSÒ¶k0³YÚvŒ|Ÿ––fll,àÂa3D<`VÏÏÏïè䊛\’_b3ZÙwyrû2!áÃ{$ýœ˜òma"ðƒMGðì‘Bö-šº‰Ü>)Oœþ©ˆE¡jº|ï·}÷ÖÉŽªìÌ kgmzg2~ýŽEžÔÈ +VìâTŽ!(vß?÷MFMï«Z™xoí¨…Âg~ÓE¹(êø²ïK¨òJ2ž´ˆËÂ{&ÙÇž_ðÉfƒ-y½{ò4ßå½×Ú°Ò=÷ÚO W_Ë­’²Û8tƒYÁc] s㔈" 0z m-l: Ü’Uu ]ãèè&³áC£Vä=ä[×Z‰ß¿¿ÛJÉ£YNßkÍÏžZ”×~2ÂAg çˆ7xþ±„ 2)u{ä⾋çWÍþzæ·ÓM(”OO_çr²hV§G­²œõ÷ÿæýøçDŽa+ÏpÙÞË7­ždD¡d……fâ´‘s[±åûaC|–®ŸÑîÃ…ºGqðî‹ÉÅ—Nå9û÷Ý(ÅwöJƒøXÚÚC"îZÔÿ@"!¼‡*'gfeG$¡PÑ#)]**ÊÁ˧©¡‘÷4… ÎCPìUë º/&b•¨*í¿¹x+ðáß¾?ÌM3íñÍ«÷í¿óMUY¥àÙ­óº÷¶TwúÏ'˜˜YÆä¢êCß©½BÓ2Ö‚?Œím5ä(t mø£¢öȸmº´ƒÓàÍmÜIþ—]¯(³ 6¦˜B)¿?}ˆƒ‘»c×Íï(ÖÇÈt)T„…$L¦íl“““Á“Ž+;þD¢!…) ‚ò»@Qa¢ÉFy `v–…ºSÏ5e‘¨…b˜[s\fa?¤©þèßÓ2D?%¹¹¹:Oª%f¨ F.ƒfÃ?`6Ù š(êôç£<þ^µïÍyÍßG&˜f\6ûPz]8iò4ΟrrÃMVmß©þù娪bW3 vU%×NÔp'¯Ú*¤:pë–5ž?TeÃv"ÈYO Áþ€r°ã"ÃÃÃ!M¥`=°"€Hˆ`‡Hs«š ‰ÎÝÒÉ0`¶eÈAJ<°ÁGEE5Õí=M!Côó\2 O\) Z™rrÒÈŽ:CVß+àzÕÐu¬M€Ð°*ŠS‚¢àœÍ8Ÿ~]:èW$åˆl!]o²^GäÀî,»$îet7tЫ"F×´k¯J¡”(ÛöèãâêbL+cÉ«©)Iç~6¯^à=RZ œ@šA2†@ x"6ÓŽ8—«f]fvòöþzþèÆÙ!0;ùÔ‡z¦oâÈKIh4È lžœ’!Ò<¥’Îs‚§(xR(¸›—Æ6Bõ]cy£^ôË+3Ïx2zÕÒ9«§÷™°â)‹¢ÞwbOs[C˜(öôÞóÇ×ÿü{”X“nÜ|ÏÉò#è¾oÞ’½;¾ÿþû[å¹.êUÛˆjÕn F™PØo¾ÿqÓÎSÛæÎ?vþšËÙB’+Aei®¦Žd€MHHh®!á®CIyÈ¢I8±P D >ËãêÕ«¶£ÅÀÀ&8÷Àã°Å#H¸c€ÙZÿ÷†c,ÝT÷ê€YVî³-ß}eéÛRVÖS¿=[Ì1CÀìT8óÕ_Oe§îdÓyÕñð´Çëæ8k:YOY{-‹I©J?6 Úôùþü?Þ£íÕ,L&~ç—TÎC9˜Ý1³ç [u'Kó‰‹v…Ö¼àòhIÀS°¡Éb2!30OÙ÷ð„…'á)«¦¡¥¦¡)-YéVówùï2 £Â‡÷®œ½û8^ÑqøÌ÷6 3µœºmž»;lÏß{ß9þ~íÏ]JŸø~^Xß7™ŸäF“— /¸±{`žn§ÿþ1Ù¢>í-®ž+NžìªöïÏ[¶ßSðXµãøw¶ÒÙæâèalÞ¾”óò8¶-² íåË—ÿÙº¶È"3ÊÙ6cj@@ÀÖ­[[F} sð{H°.•ü®-[2 ˜mnÜ^†æíà—¦ÞEÑ¿§5ØJ­/<±à°ut’šœ‰åÔg||Æ÷…Ðê³ðTìÂÚó}Ÿ¾úÜÈ7iâçÓ¦ãã?ÿaöÍ«·ßpþ`çWŸSs˜»ÇIíœ_¨ºët§*ZOZsnÒšzM¤ö‡‘¹U|d|Ì\\\¤&„s+É—WTìÚ¹sé²evvç"$‚Ø\RpÓéê³råJ]]]¡fNOO‹‘'åiŽê€Ùsó7Ý…€Ùk?Êi·ïãê1zÔô½,U¸³>êÎÝL)n/ßú§:`¶«a€Ùñze·zM©˜£‘se†o:'`¶]€Y¹~êOnL>Í ˜eͱÒ>êÌzª±{VÜ ØÌ ˜9T¶(| ªp77/99th,w? ¸Ùle~½ô9ösîºKYs#V¨Z}ȵpmPZKc.õÖê“’)V)z†¦d f[±ZPÝRU]3%5•çÈ{xÂBô“PÌ DÔÕÇÁDY)È”¢p×…(2 &‡µ™ê¢I6H}C [Iwg½ñ_™µ€ú|úÄI¦¡Sý!%ÕÁ ˜ýeí[—Þ8ð 8esf?œŸ³jß­¦‹ÿ¾töè7ú¬Þ!Ò€Ù=goæþ;÷Ÿ·”f[¶dðœ•ù wGÞÓœá>_Iø1nÛêÍ,ÆÛ@BT\o^ÜÏ- )ˆš¦6yhàh«ÔGðµÆ–RD`poÃPŸšE2ñ ˜mí]î\°• _„¼§1&$8k uÚUÔÔI k›>fQ©²’”Q¦H}ÚÌ}JzE[@}€÷Àk ©2bÀlkoTm}Ž!Œkêk0òžÖ‚+•þ`é…r¤â›º2öc,,×>ÌãfO.{³|ìÌEâ›OØ‘+?<¸QJ¡?ò™øÝaÂã…IˆöÕ+’••%D"5EêC¤Õ@Yø! õt…ð…©¥ËIüN¢f[¹XÚz0Bfffãq0ž«1&D?µHËÊÊLÅÉ{Eý7ÿÛå¶®#ñ²ºWeÞñ;¥Ùª‚õ?~¶?QÖ‹ËDáÖÄ*’‘òàRŸ½gâ1‹”ë×–„êê^¸“Òl„„¾Â~‡º§© ˜mÕr©krVœ§ <òžV!+•ÎÙÙÙ0¯š¦í=0>ÝzöBÓ#ÛÎÍÚ7òVMvQð±e‹ýÓè´2¦Å¬£?Znš}À¸—aZbD”∥“ïEĆôܽ÷'w5VʽM þ}WÌ.)7³mÓ"§œcS¾;óyHÞh¼ïž¥Z~™søuA%ÓÐó7ßåó'ï2èa˜“ñ‘aÿóÖM£•Œ™xeåùÝýTÊ‚ÿ¶ÆöÔͱF4Jå‡K¿nyóJeÍzýõýö|snɹý”‡Ó¦ð”ä{ËW¿ÕÃU]l&Nõêáù1“Ê}Ò²I‘ú´ 7ì%y¤>ܤºéxä•©Á7óüBFÞC¾•.*âì8‰ß¹G³çÊooNßðtÄa×Ï ± Òä=wûNvQÍ9=ßk[įìÂTõq¾[Åüä5ùêˆGçÈ]^8êhÔŠÞvÏ~Ø_¸ôð¥!šï÷Œ^rnؽ¯g_¸e¼¾ì‚ÛÓöæ,9zkŒjôŽŸÝËAe¤kxûííL<0ÒçtÌzÍk;Ê[\êu‚ÑãÏõì©9Mã)É¿¯ÊŠa#®ä†*ªj wuÀŸàСC÷õáæD©»$Üß‘ú4ÆD†Ï_ºt©©› ŠóŒÊÙ¡>Ü'Ÿùì=Áž¼bPÁä>—@ÞÓ¢Ÿá>YÁ¯YÜ‚ÒM¼¿sàçýo» ©™Š¦i¡¶nùÄryZ^TžI)KIÕ®—™"ENÝPÇBÃNSŽZ®£Ie”³ËRž'=‰]8j3d",g”vͪ¤4$Ð&R½W7M9 ­ãò¿ÿ¡?<¯nïaÍQËÌÑ$ïä'ñ‡M«,ÇK’ìø§±Í‰ÑôB^QRŠaØ…„ŸPE|ÏÕUéŠ M 9ª›ƒåËXO©s{)¿‘úˆw‚ åÁBGS[(˜`ÒÕˆ£H§h©Ø[ñˆäh–úp³¨Cv{bª&a©êg•ðä’žŽc› œ^N®žy¤W¢õóqŸ¬Šâç=Š’íœMŽc~¼doW]½2é¿EG™ëþ;ã©U|c‰ç¿pêËýD¥}¾³8ÎÐTª¢Õ¿ã m?ßa}'®>÷yŸ‹nàµg>´ãV]¯…MázRÃi* u-jê°7_×”§$Äh=üMŽ ¤Ìá= £¶ÅÄ!æ=;‹w;²IiZ}©O«!$Ó«gwêC&‰ëÈÊŸúp¿0•«Í±x´)À:®]p¨«×cÌbsvhSèJVYnY%%±Û{@-ªF¿‹ÕÎm{ZÄá#ÌâÌbu[k5Zeú}¿¨"fÓå¶”L{wȽuûÐœŠø«¿ýý¶PÞrÖÙ³‚/Õü <°¼»•«mîÃW¹,JEÜÎųö'1ÙŒð1°‰Wû:I§“©"UI›Ã©Â—šQM™˜ùÉ1I%l9(ûÎâR$~à+š4£ù>üÆã{MQI®ƒË9ßVdºˆ^dZ­¶-+Ÿ/.ï‘€¼m¯µWª&»%% C~‘÷qµøË…f8 ª«¬ˆÿ ŽþmœbB‡u(ÛLš©ylÌŒñv¥yÏr ß¾áZ6oîCÕø×*‡ ˇõ™ø÷{ƒkðüEÁjÔ?G}¾TüB7»jŸg»ˆ{hôùöòÛú]§ùMƒ3n/>VŸŸvë{Ô÷„ßê%iVŒú´æ/99Á}™[ nxµ4ì"xnxÁcO^^B{$%’¼‘»Ó»õ†_ßùówYUνËþŒUÖ,Ë yüõït{•‚€Ñ3/Øõ3/‰ LµÚ]>7)ìâô#;ûÉ¿:¼ò·PºvU¾|¯µ{¦uR­ûÊX•ÿìð÷[ÂX´âR›éÛ~ïúÜkæys7k¥¢¸hÕ‡æ•-X•±ýß…¶¬w«gírx¿gõüìÜK#ë4;òãpjÁó:³ì™8íë :VÊ63~öÜP;þý•ë 3òãÔÙlû˜WåD¿—Ÿá·nHå]^>9à0–uõXÂgí¦h\ÿmñþl+]ÕÜwsN®Roº†J‰o]¸d·®ïw.‰Ý âS­ÍÌ}²R9þ/x º¼<±‘4H}D$#vSøÂ”WPûÄ_&`ËO]ã´oã¶{²ãžf2f³Ž,ùʬðòèùg¢¼×9S©l¦Æèùzeì迦r¼ß«ð<ÏDZ…þôÈé ïâöìðõ³6Ÿsðzïw3µÆª®hÓÓôÅ ÷ã‡gšú=Ï.¥ÒXêã–m¬’¼wöÂÓ)3xk)W¿Yòà¥Ô“ug¹Òk“!çý랯ò÷~á~ìóø…Ië6»Ük«Liè¼õ_É¿ýnúÎ÷%#†ößÃBiál×ô;:G–×h⤾-qì©#>&~ƒ§m«L®7Ýå!'¦éKäñE«¶Á7DÞÃûF!òY:]6WMuÀiÿD¾yÙ„·7?&AZ4 >‘ Å@#ЀúpÂy$ý©`ê½uÂÂ{^ìT-UA1ûæúwU©ŸâK-¸®‰t s}y¸¢¢¢¥¡C‡_”ä˜ÌòœØOéÖý&O©*¤ËkkymÛ:±VÅ’7ßö rצQhÚ=}FPØùñòÚÖ&Àé ÒV­øNa“ûiu›Ýc¬? £R^ÇÖL™ZS 5ºvü’à#õ„aTÒ5-ŒÉ(ªÒX•à,ɵEÕ×®4?¹\¯»Ž…®ãè¬w„¯l ƒIÑãvÅ—;¢©Ê$²ùmüI¥3ÔjÏfe…,)%ºTV”sWG6Ôi E]êcjf&“:¢R²@]êu*jãB%§Ý|ÄÆ…O¾ÙøšÊ¶£”Fîþ5´ÿõ]£µÓ|¿ZÃ'ºBA×ÎÔ²ëŸÌ´+OKÌP ^Zºâú§>£`íf¡š›ÅôP-z¾7@nÊ0Jen\j»“\vlºµ®r"3­¼ŠÂ.MÿXXÏó²n³vZòòUõfQÓ{PýÅ%¯Û^£ÎøÞîšñ]\_;šš|njaE¹ 6,·rgØúãXJ„ôÔŠÚ8Öyu$ø)ùêÍ‚ç#8†âOI‰xu=D§m-õILLݨ8" zj©&§¡¥'ú š‘fÙaɉzæÿþ°‘ÑE§²B“„R¿uær¹¾®×Ì”.O©?˼šä-&opû2¾®…}=a>7û"‚IwgÛhg®ÕnèÚYº[}V¾ê K—SáPŒFJYðÈÃÄÖ¥ËsVc3<µñ©ÖM„½ÅŽÀíÛ·/\¸0jê|3ëöbŸ¬Ù ˜ñÿ“°îÕ:W® ;ÿæÄIÿŽùïÔtƒ:ûŠNž|fþéº>ËÍŽü¹»4âÜ÷‹ÎÇSäY•ºCÿÜ´¼&;ëùŸ³¶ÞI«¬ÒpùöøO^Dp«)/+ý÷ïµNNN ,xôèÑÉ“'猳"oþ>ëó*4÷ß‹ Áu.|šá%"#šYºq_ÄСC!>n×?Vt&oþ>8Ÿ»•r÷e†‚¢âœÕ¿óiFÎKìü«cÅþÏw¥_Š€ÍD†330(ÃÖµ³NñƒYs®Í=¶µ'Ç4Žq‘×Ož:ujÿþýëÎ/ç"i(,ÃsªªrFY)'/°ôºå É†§^×HS•t*Ôfú`=Ñ¥).zö«oñʃý.nÑ»ðs@ ³ìÝÿþ ºõVðåóK ¶­y’ËÇn,9„ÊK9‘«**’È«$9­ÍTT¼ý,£Ñi<àvÀÑ9û7 ²l…%îTvúýõÞKçMZµGnâ¢îÒ"= ³’w¦Üç"Ú=Ó¼<Ü'ki 1x…n4x¶ÉßFÿ^ꔪÜcºz;'ÿzLmùÒÍ3í¹ZÝãݠΨ^Z³åKÿîk¢ÈÈ(bQ4Ê Jèúº*ÌÔÇ/ÕF,1W Èéy 7[w/ºd`¯/ÁïÍ#(žåeÞ£¦&Ëiaôl;›’Q¢§§Ç­+,qTD µé{µ¸”¥¢ªJ­_¦ µC¥?UkôÕ“Í #`³æ°ä|ûùÂh‹µY‹ãÕ8yXaËàÜ'+׺ – „TÎ`ðëÿ] Êw¨‘ûøx²ó2'jzZò¥áÞ:ð“]p¿Ùò¥P­bí ‹SúÔ.Î2ùö¶‡^UXJ¡Š'€·¢’Ž%+*yIŸ÷”Ug•aÞSKz<==322÷ð¾©ñ,¨%=>>>wîÜ)(j˜±—2¢bG€ëËõˆ­;îs‰z‘O ­­ cæ‹|ä(§ï6½sÔ©g…Ì´§g’\¦÷Òâ”/ݼ|∅KvFåå”V55pyuùÒ_Žê7uü’ûŒÒ(_Úð`<\³=kõ©Ç—1?±ø|bu›Ï[[ |5ì-Á¿…y0›––l–?¬Kz¼½½%ˆ+N…‡@]ÒÓ«W/è̬ªÈ±pÓakÂ"Àznc‹öÂ.Y“‚ù9M¶ôªF¯¯?ø2j˜æ å]“Ž5,_úE¢šä£5uFÕ åU¾4ï½úWMä)rÚÝû'œK`ö²Ð,NÉeR´èì’ì ª‘!6ïù¹ &lI~ñχ¤Güã ¢A 1éÇ€ôô6â‘&ifѬ™ØF©¬ÎöÂõˆ­; Ú{Ĺ؆”`T`p¬ 9€øLè™xz¯/Ãs†½JÃò¥Ì϶¹†uF(_êlÒ^/'0‚F£²Ä°tíFj&}Ë®&•SXé·ý?¹|e/EmX IDATϹ/+P(£¼IA>e(F³4&=Ð{,x¸ÊDÂ3fÒ¹‰½·…~®ˆUþ~çO?® ¼Ytø6`eü5ÅcîÔá>^s/ÆËHqå²j/Xä=|Wž< Š òàÃL‘Õ'÷I¾–?`U[!´*çâHŸ}qð„(·zÊ‚»Œ‚çç_º`ÚâɳN„7Š€-ÿpâ¨Çž¼¾ÿÝó[î4jÑ:¹¥Ó›õܘ÷à>—tÖ£•³šššFEEåegèš¶r(QuWvÙv7©f0EåKGúä\mTg´ùò¥TGïë{”èt_yùìJQÉ.‚qØìª¼¬ sŒE˜!ôf)PfhŠô@·ÚÄêšÏH’TÍÞËúl]?}—AƃCa=¦-i¯¶Ò«¦6V¤§Oý˜õ+d5(ûÕÁû·µf*ô²ü"&$ùcUŽͬ©É7)Ïiˆv’ýÓ8ÖyÑVJ y€÷@»ÜÌtâðä–ÝF¹ÙLf%w]jµ ‰)È+$´u<þ ³¢–:¼Hzd÷nå¡Ù£×Y*J¢KºÅc†ÖžR—ëÝMOQ‡oÒ³ª«s²—µV‚ôWêä=…µælÌW#®/›¾ÁEûÖŽ/•¿š±"›_Ù/N'öŒm£6ü²j–¡¹’’¢5íxýONØš3MKåÕžøç`Å¡ü(ÏÄ"<öJ‰¯JØÚÚÂ+ZfZ²ˆÑ`&_™å8ÈÙÚÃ^ÝÉÂÐÃÙzP¯qÿœ:ñ»'­ÊxZùáÁµˆÚ`ÑÈÌKÔK©bܘÉwñáÚqHw§åûƒG2´^ÈŽ•“‘Ú®];H1ÐúѤ8’)‚S …€€¤Æ„Mg°ŽsÓk 56&5Üw<÷¹÷ueíìì@ôä„h+@7ã~/8Á¯§N_ÿà„{/.Θzòì?}[cù¬Ê¼ãw*´T´±‘¼Dkʱ~U|¸z;¢¨z6váã·†ßó$òÆ_¨GºŸÙp[§õ¦~Œ‡„èÄiæ¯)’þøàUâ 8é™Áøjdd”›E&â ,’äeg‚°ôuÁ-’Ƙ㌽½=¼Ä¤$D÷ìÿÙÊ)6Á‹îŸxv‰ïäÃs÷2LKŒˆR±´cò½ˆØ°‚ž»÷þ䮯J©_^ÔU)Åoã¢mQ,yf¹îÀuŒoyóJeÍzý¿Wk]Z¶¸nÉRæÃiSx »ÅmÛìÝ= ³s2>2ìÞºihñS¾;ó¹’…¼Ñxß=ßvákYyüB®³§ƒ•BUë½ã?g5] UÃÊB¥ŒQ¼GÄt?).°ïÔ©“ØV@ì#é;Ä8ˆŠôpç„€ƒäädpmV“…Pvá(ëÃäf¥Š bl¹J#ï!ëâé“OdddiI‘²Š$j€SiìÂTõq¾[Åüä5ùêˆGçÈ]^8êhÔŠÞvÏ”½é°7~Ø©“‹íØIþßi YêåÇèñçúªŸä”,mbØh'*#]ÃÛoogzä‘>§c†,}áâl¡×KNI·ºŽ3óÎÎ'&>»EÏ'ÇGAPŒ¥¥¥Ð£’b¬JÑ<- =0¨±±1ü“ÁyãÉê­iK×N´ý—TóÐÊZ‹œÌtxJêëë7V ámŒ iÎtîÜ9""âcL„}·žZNÕ®—™"ENÝPÇBÃNSŽZ®£Ie”³ËªË‹Æ.µö•Ê¥]³Ø†ýG(Λ´4qÜW#'iC©Æ)YºnùÄryZ^TžIuÉRÞÖUQÕí=¬9»kfŽ&y'?•S›S³"éøÌ5ÇãËòc“X#½}UŒ¼n[ÒIR‘°bÙ.õå'fVòÈÉL+ÈËquu…¢i¢WRc!é‘Ò8OkHÍ(õ˜V\Ê‚*ëÜ‚£ŽÈ}éÞcaÛQÀ.boÆJ»÷ëâÓqÅŒ˜‰Z® ”´=þømÁ_ë[:qeüŽUÇ]ÿ·ÁM4U{x‹×>ûÖ™´îS]t9ŽŒU9÷÷¬Øø†©Z•/çüãÑeýôˆô ÈÎËJ‡M.ž_Ë¢~´tͰ_ €`é³gÏÆE¼“ï¡|¹‡¨´Ï·8Ç‹¦QyQ8÷Ó‘ÓC\½¹ôôÛOàêW™ô’¥M E×¹APt*׸dé‘]Ë;×ßçR°ð9uʇRx}ÂêÜíû¦›U Éʺ¾tɽ•'7öªþ¸Šöˆ|:;;‹vX‰ö&‚“×ÄÓÓ«¬K sœ¨e@rEø®–ôÀ\\[l槤–Í+–^4ãA›/¢”†¯ë·ÛõܾáZðÒ˜uÌ×¶l·]=÷‚]?ó’¸ÀT«¡Ýås“ÂÞ)N?²³Ÿü«Ã+ ¥kWåË÷Z»Û«|×Ïÿ ¢ëЊJl§o™ÏØuàUà“cYW%(k–eÐ<þúwº½JAÀè™PGGG¾Rú"’B/ ÷–‘è Uótuu3R?’K*•ÍÔ=C¯Œý×TŽ÷Û`þƒç™¨B«ÐŸ9ô]Üž¾~Öæs6.ùƒöíçHM ÍT5×ßÃBiál×ô;:G–×”­ˆò^çÜÄh¥Ýh¬ õqË6 VIÞ;{áñ—î¤Ö$qR„ÊÜuëšo*RίÙv=);ìU~äÔ`ÝŽœn«– U!w#­F-oJâé)œµ†¨gž"!ïá iNöèÑ#>>L>º»KOèêò¢g-vR‘]¡;òŸßÕi¶´{KÜýäU©L5—ÕG-ÛÛZ4©ÜÖ…35ç™qËÚÚóëYN«·o¸¶cdS‚ÓÔM«ü¿³3!Õïïí¶Ínr}Gcøù5U¤\Ý÷*.'qäÃ?à ÍxØžkËºŠÆ ãe~JÎÏÍ‚M.ØHnJ ž‡ø^ð{èÖ­›——a…DÁ@2µB"–Q£F¹¹¹µ xE **,PÓÐlÙìE×0×—§PTT´4tèð‹’“YžS¿Ö„þÌ]ãnýnlb•ÍÄåë;sÝX¨ ŠÙ7¿”­¨¶—ó ®Èk[›À·*ø¨ßÓôÚ¶u Ì&lÛ:¡äݺ™!>~>65vöª¢K7¼uücãdbù$¥'ejÉ6P yŸ•&Á¥ž={ž?>òí+‘óµ—î ¬A@mà…ëœß=üûVŸi·üÄõê_äüùt@õoÊ‹:¯ÚuwUõ%î1â5/[ûž-¯9·pÌtÎo#ýTŸ¨?lñÃɻǮÚçѬ!«®¨5#ùŸB»%oÞ.itZD'¢ÞÂHîîR$-׸Úúõë[Þ{"’BÀÄÄdófplù@à=`òQÓèÒòQ¤ÙSA·^­ E•äH§GǨ”Fþ9xû}ï €»8r÷¯¼ÊVð”»27.µŒÝI.;¶@Ý”qqéŠÛŸj²Ÿ)p*WôiÎ+ ªàùþå{T–Ùè¢E$Ï޲°ÐÊÊ*\öÆÊ#ïiŒ ™Î@xùÀç9+-YßX¦Š*++bÂá5”ì™{ˆ5Jˆ´î–Gjb¬MG’òùúµ&¶Í \ø}Ëv5MZI‘ËØéZ eÝ5žmE_ÛGÊjµlzªÖè«'eÑØ^NÉ 1ZZZàÖ4È{šB†Lçáé«©©þæ9d, “Üd–õÝó\ÆIf%PvD  !¼>³««Êà!«d¥§ÂÖÿÄ"È{daõét:˜|ÊJ‹CƒžÈ‚>„×!9>:=%ÑÅÅ…gÑ;‹"mHx S>Äàû¡ /B$§8¤çà£#ò>àéÒ€ 7×û—ÊËJÉ$79e |t^GŽl2õ9ÕB©GòªƒK@bL¸ŒëÙ†ÕƒúH–s`ƒ¼GFnÍÒóöù}Q‰¨j$D…@Â{ÈœÔTr¢ Žr!mn=ø·u dTÿœŒO¹Ù]»v…=>*"ïáÉ.õïßrÉ„>.ÌÏ%™èä—Åb¾¸ëa\£G&Ô()"€p€êÜP³))> íâ2yCĆ¿½š­–ˆ¼GvVîøñãÁŠ fÙÑŠ`š@ì:ÐJð¦ŠI0ÑPDh(ä_’qÕH(³|îܹ×ÎõžI¡P‰)')¤‚”H©‰q°½%à‡ŠJ‰DÈ'o²E2"yò’Ÿ”83ÂVו+W‚žvéÙJ–G0”¤@WYIñðáÃ!È Ý‘÷‚)Û 4("""<<ô틇N½K†††+W~±ä^l”@CDƾOn]¬¶’2Yé)aožC9¦~ýúµ@ä=-Ä]à¾páBȼ’}Õïä¸$±2¢6þøŸyûâlo­ZµJ¨ÐÑI#!ˆ€tèÑ£‡££#¼~ˆ“Ž8«À@ðÝ£€ó6{úôé4Mà~_"ïihäîVŸyóæAlvFJâEßçEn}Z-}EyYÀ©ÃQïƒlllV®\©¡¡Ñê!qD P}^ ŸÞº„Î_¹ð×ϳҒáߨ-•¶~ýú–õÄ^äEÜœ!B˜rÈûw±aÁúƦÚºäU§5’ççfùŸ<™šäââ–0ˆìhÍhØ@HФâ„/FøJ+x»öŽ$ÕBæÅ.Ì˹uḊ²Ê¢E‹Zw‚¼Gæï“&´³³Ó××÷îmTÈknlÞ®­•/³vÀ©C%ŒBÈXå·Zf2m_¼€ ¤BÀÚÚ:,,,.:ÂÐÄ\SGŸT²· aa‡ëÆÙ£…yÙP‚©59ô‘÷´‰Û¥)%¡l{çÎÃÃÂb#Þg§§š[µ§Ë·‰Èmg{y?àéíËP{ ÂܯzM¡„çD - A¯¶¶¶Ïž=KJˆ¶ïÚ£|’heß½xõ>ÐÍÍ 2ÒµFlä=­AOúB-7¸RSSc"ÃcCßèšÈüž±‡ž„¨X±bEûöíea!QDh5ÎJµ¿{ûm»¶5x«ñãYi)÷®øAÐÉâÅ‹Á«53!ïi z2ÒvI{öì ÛÛaa¡‘ïƒJ‹ÆÖP¶FFÔ«£FUUUÈ«Gw.(fäCý^¨².x _ÙC5BÆ@¢ö>ÄD†)*)šZ6n€g$¤êñ÷Û1(à…iddÔJ¨Pδ•C`w™A¬>¾¾¾IIIjZý†·´sÕ@ØÈ{p4µ´¦O›Ö¥KYÒuAQ!À`06oÞ\XX8fÆ"C³v¢Çi)ì›g}?Ä„yyyµr‡‹+òž–.„Œö‹È;wüýý+++-lì{5Z[ÏìºB€$$ ~)úôé3nÜ8²+…ò#ˆ€øˆßºõeUµ s¿UVÁÔíâCºù‘!³$•…Œ»P;H$Ž˜È{š½ ¶ÈÈÈ8sæLxx¸ÖÉÅÝÙ}|þɈ“YöúYðÓ»`&577Ÿ4iD±‘Q”@$ŒÀÝ»wÏ;[]^>‹á›P³ãt\ êöæy_m-­µkׂ3†H`AÞ#esà=ð±OKK“WPìÔݽ[¯JÊ¢¹í$€‹ÅŒ|û*øÙÝbF!TÚ5jd4Çâê@§@dãÇCx—]'gO¯©èã,ùeÍÉL»ì»KNŽúý÷ßC2}Q €¼GTHÊæ8°íõâÅ‹k×rssäåì»õìêÚ_]K‡ÈÚ‚i'"øEHà“’¢B(YEáÀ„„D^2”  &L&sÇŽ111®ÃÁìML!eUªÒâ¢óG¶Á×8”•„D»"TyÁ”Ù¡àÃ/=·oßÎÎΓ‰U‡NνLÛÙ‰d«U„¨ÁË0ž˜×å`íß¿?0Q™FE('… dA|œÿüsKVvVÿáœÜÈ"6Ùå„ЭËÇvÃW:”Œ€úTTT Ÿ<×Ì 3Šà/B2ý[Ž;³««ëìÙ³Åñe޼GŒë'ÛC———‡„„û3l„²à÷cnÝÁÄÂÚØÜJMS[ê——–¤%HKJHNˆ(LŸ ¨ÊÛ½ú€\«â˜ÇDD"Û·oß^Ŧ óžmfÔG,wžÛˆ‡pëS$ ò±,^›´´´¨!¡¡E WwÈ|h`b Î?#uMm(}*,,P…®¨ ?/'(NNÆ'H<¿Cö°ìíí!£ÔƒÌåÂŽŒíD¨¨¨={öT±ÙC&Ì ·°Ý±= ÷Ö9ßq‘ðÅy™éb«€¼‡ÿBàU!€ÜßÉÉÉðV?óòò¾t¦RUÕ44´t ’Š*ÄÃ+(*——WPâ¶D;,f%³²²¬´¸¬´|ø‹ óáÐÿÚA Ù ˜và€ÚK^|Ÿ !tƦˆ"ЖˆÝ¹s«Š5düLËh/Ýe¬¬¬¸uÞ79>,=óæÍë×;ò鮵,Ï^TT”’’ûâééé–““ÿ±ªwÄš=€iiiéU†††¹BÇÏ7«6@@ÞëvíÚU^Q^»¹Ê€FRWRê_?sSœçÌ™#VÒÊ"ï‘úŠ·-`S âB‹‹‹Á'šÅb“W¸Ñ¡<*viá§8ÜÙÚÖ¨-"€ˆ(bÔ§ÁèÑop÷¾"޲ÈÄ•‘Ÿ{íÔÁüœ¬¾}ûN:UL>=uõGÞCÜ»%CD &`Æ”†âÞÑɵßÐñXÈ¢eË”‘úñæ¹£%EŒ‘#GBVý– "l/ä=Â"†íD@ ˜®ÁÍùÇFæVC'ÌTVÅxRá÷o\€P•)S¦€±G¸Î­h¼§àaWD@6Œ@ee¥ŸŸóVò201oÃ`¡:dÁ}~çJhÐS55õùóçµo/ѼÈ{„X*lŠ ˆ"€4@àÞ½{çÏŸ§ÊÉõ4²s>XÁ”ÿRTwçÒ‰ô”DsssˆW‡ ûüÛ‹ü*ò‘CŠ"ˆ"€´-¢££9RPP`Õ¾Ó€Q!UGÛÒ_`m!'áÿÓP=ºwïÞ°½±,wYCä="ƒBDh³€»¯¯oXXìy 1ÑܦC›…‚§âPjôÅ=ÿˆà—JJJ·e(x6“ÀIä=§@D} wë;w®\¹Âd±ì»öpÿj $h•}µÐ>º~ŽQgii9wî\:‰« òq!‹ã"ˆ"€´A>}útìØ±ÄÄD0üô6®c¡VeÈIøâþµ¨÷AtmĈC‡•@†þ€#ïá^ED@„CjxjIDATâ•Àðãïï_¶û öÒÔÑnò·ëWDð‹À‡7¡úTòññ111!‚ZÈ{ˆ° ("€ ˆ€¬!Y Ïž=J£Ñºõò€mgÛ+-)áéíËPL’ï3¦_¿~ÄIÁ¼GÖ>i¨"€ ˆq 9{ö\VV&”dvväèÒ›N—'Žx"—$;#õÕƒIq‘@túôéãå奇D>KkDÞÓô°/"€ ˆ"Ð °ÛõàÁƒ[·nAµfpúöcß­'Fo¦Ù.çf¥¿yz7>âìp988Œ72ôP ä=\ @D@Ö€ªÌàôs÷î]¨Ç¬¢¦Þŵ¿£s/ÙØùÊHI ~~?16ŠNX[[;VÂ)˜…ºW÷6FD@Zޤù¹ÿþ£GŠ‹‹ôttrspvÓÒÑoùˆÒëYÅb%D‡†½~Þ< p×rt$züòéÝ283"€ ˆ@›D ¬¬ìéÓ§`ûÉËË£P©¦–6Ž.îíÚ;eó«0/'âíK(,ZZ\~<]ºtÆ–R,&òR, ‰ ˆ" k@¸ûû÷ïŸ1ŠJÊÖö]ì:9›XÚ'ú©.è%EŒøÈw±ao3>%Á––†††»»;”R—|­ÖÜ È{ZƒöED@Z‹@VVÖ³gÏsrr`,5 °ý@â3«öòòR¨`Õ@ŸüœLðÝ­OI ìª*ˇÍ,77·nݺÁï­U^âý‘÷Hrœ@Dh„˜|‚‚‚‚ƒƒ¡Ä)\§Ñé&6¦íl-¬ ŒÍå$H2Š…iÉ à¸“UÇacgÙÖÖ¶gÏžÎÎΕ§‘ø¤9¼‡4K…‚"ˆ"€´€}üøJœBÎCøþ­!ë©…‰…®±®¡‰¶žœœ(m-¥%E9Ÿà_vƧŒÔ¹Ù\¨â€u§SõAjºS{ç ïi "Ô@D€”@üW|||LLL\\\rr2¸qÕ€ & m= -um] -]H ¤¬¢ ©•”9?›ªUYQ^ZR\VRe³JŠ‹ ósù¹…ð/7»¤˜Q ºººXwà§™™YS£‘P yIÅFDh[@â(zšR}¯ Â¦  Ë+Ôu¾©(/ãÚ·§ÓézzzP<ËÔÔ2 ÂOø³q3™9ƒ¼Gf–ADh[Wh @ùùù H ?Á,—X,V-ŠŠŠ@ƒ jÄaÁO8  (Ž––1ÃÇÄ´–È{Ä,‹ ˆ"€ „C@Žp¡@ˆ"€ ˆ"€ˆä=âÁGED@â!€¼‡xk‚!ˆ"€ ˆ€xø?! ›]ý<IEND®B`‚ceilometer-6.1.5/doc/source/measurements.rst0000664000567000056710000000212213072744706022332 0ustar jenkinsjenkins00000000000000.. Copyright 2012 New Dream Network (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _measurements: ============== Measurements ============== Existing meters =============== For the list of existing meters see the tables under the `Measurements page`_ of Ceilometer in the Cloud Administrator Guide. .. _Measurements page: http://docs.openstack.org/admin-guide-cloud/telemetry-measurements.html Adding new meters ================= If you would like to add new meters please check the :ref:`add_new_meters` page under in the Contributing section. ceilometer-6.1.5/doc/source/overview.rst0000664000567000056710000000377713072744706021511 0ustar jenkinsjenkins00000000000000======== Overview ======== Objectives ========== The Ceilometer project was started in 2012 with one simple goal in mind: to provide an infrastructure to collect any information needed regarding OpenStack projects. It was designed so that rating engines could use this single source to transform events into billable items which we label as "metering". As the project started to come to life, collecting an `increasing number of meters`_ across multiple projects, the OpenStack community started to realize that a secondary goal could be added to Ceilometer: become a standard way to collect meter, regardless of the purpose of the collection. For example, Ceilometer can now publish information for monitoring, debugging and graphing tools in addition or in parallel to the metering backend. We labelled this effort as "multi-publisher". .. _increasing number of meters: http://docs.openstack.org/developer/ceilometer/measurements.html Metering ======== If you divide a billing process into a 3 step process, as is commonly done in the telco industry, the steps are: 1. :term:`Metering` 2. :term:`Rating` 3. :term:`Billing` Ceilometer's initial goal was, and still is, strictly limited to step one. This is a choice made from the beginning not to go into rating or billing, as the variety of possibilities seemed too large for the project to ever deliver a solution that would fit everyone's needs, from private to public clouds. This means that if you are looking at this project to solve your billing needs, this is the right way to go, but certainly not the end of the road for you. Once Ceilometer is in place on your OpenStack deployment, you will still have several things to do before you can produce a bill for your customers. One of you first task could be: finding the right queries within the Ceilometer API to extract the information you need for your very own rating engine. .. seealso:: * http://wiki.openstack.org/EfficientMetering/ArchitectureProposalV1 * http://wiki.openstack.org/EfficientMetering#Architecture ceilometer-6.1.5/doc/source/2-accessmodel.png0000664000567000056710000012576613072744703022240 0ustar jenkinsjenkins00000000000000‰PNG  IHDR3‡­T‰zTXtRaw profile type exifxÚUŽË Ã@DïT‘øí°”Y¶”R~ ëÈÎ;Àh´z,íï×AFXÉGL$À…§§>+L^³(Kïš‹s›TÒ«&Ó3د‡~ö?†aâÀÀ†MË®»‰ž%j+÷7òvÍ¾Ê ÿ}’»œ>B ,(.AJ iTXtXML:com.adobe.xmp ΓÖ[sBIT|dˆ IDATxÚìÝwXWÃð³ÀÒ‹ôލ¨X¢ X± ±¢&Ɔ%[,XbMvMÞ˜˜˜(–ÄX¢±%Ø %¨`7*b£I ¥|ÀîÇ‚K“]<¿çñgfïÌÜÖ³· @DDDDDDD¤$TXDDDDDDD¤Lf‘Ra˜ADDDDDDDJ…a)5VUÕÞ½{qæÌVQ6|øpôêÕ‹AD -3ˆˆ¨Ê¢££Y DDuÜõë×Y D¤0Ø2ƒˆˆª0;F/^°"ˆˆêccˆ44XD¤PfQµfgÃòy<+‚ˆ¨IÕÕe˜AD §Ì0ƒýŸ‰ê>kkk,]º”ADDDDDJ¥Ì13dÕ}±±±¬"""""R:oífÒÐÂVV¬)¢:$2.ãÙ€ˆˆˆˆˆ”Ó[à ++ôsqaMÕ!§ÂÂf‘ÒâÔ¬DDDDDDD¤Tf‘Ra˜ADDDDDDDJ…a)†DDDDDDD¤Tf‘Ra˜ADDDDDDDJ…a)†DDDDDDD¤Tf‘Ra˜ADDDDDDDJ…a)†DDDDDDD¤Tf‘Ra˜ADDDDDDDJ…a)†DDDDDDD¤Tf‘Ra˜ADDDDDDDJ…a)†DDDDDDD¤Tf‘Ra˜ADDDDDDDJ…a)†DDDDDDD¤Tf‘Ra˜ADDDDDDDJ…a)†DDDDDDD¤Tf‘RQcÕéé騱cà³Ï>ƒŽŽ+…ˆˆˆˆê¶Ì "ªC¢££qóæMܼyßÿ=ÒÓÓY)DDDDTç0Ì "ª£bcchQÄn&DDu˜8И?>»œP¹**°ïÙv]»Âر)4 êA 2SR|ÿ>ž"&$¤Úö7öìÀ.·r-§êå¶jl»¸âyx8‚æ/ûz•TŸœÔT¼|ôñèôi  @îëMDDTYl™ADTGšZ` z;};; Úöº,^»îÝ cn5M ¨jh@×Òö=z ÇšÕèõýZ¨ëë³Â*ÀqÈ'eµEÓÐÖ;"_$‚e›6Ð17¯tYhÀ¢Mtþú+¸­Z ª*/<Õ8…k™¡ëâRáפ……ñJ–¨?Ö up‚È»Wù߶Р2éÛØ ß†_ ¡§‡ô„Dü·o/b/_AFR**0tp€ãOPßÍ –mÛ¢×ÿ¾Å©™³Ÿ›[¥ý¾/ßÐ5i¢pÇÔ¨o_¨¨©âÖÎÝh5nõí‹[;wV꺩…Ð13C}77|0öSغºÂqÈÜ;p€¿\DDT£Þë–›öí«TxBD¤,º» ‡æí°…ÉÖeñ"hèé!þÚ50÷ÿöGjl,òrr›•…¤;wpÞg%.øú¢ /ÆŽŽh6b8+NNÆMš*Ü15êß™/Rp{ç.d½z…†ýúVº¬|‘©±±¸³{7Â~Ùhا7/<Õ8…3ã]´,¸~ïï"ªóº» l¡A¥XwècGGd½|‰—¯€(#£ÌmŸÿGGÔkЩ±±2·1mÞÍFŒ€Ù@Ã@9©©Hºûî<ˆ„7¤¶­è –mÛÂqè˜4ou]]ˆÒÓñâÁD9‚è Km/ÔÑÁÈãÇð:* GÆ}††½{£Å¨Qе²BzBþÛ»‘'O @óáàke…Ì”D?ŽÛ»þ¨Ò9¶= m¦L)u¾ç}Vâé?ÿTªÎÔuuáqì(^?}Š£“&ÃeÆ Ø÷pƒš¦&þìï.W=š¶hƒúõqÿ¯¿Ÿ›‹§ÁÁp:æNN¥öWQQçÏ£ã‚ùзµã/½¿aÆ»À0ƒˆh0ÐxŸÙuïˆ8r9oÞ¼uû° Ë\ר_?tZ¸@j¼MCCØvq…­kg\Yÿ3øûWê8[Œ‰6Ó¦J-Ó00€Uûö°jßwvïÆõ­¿J­ÏËÎ.ü £¡ û=àºÈ[²Î ¾:}õ%2’“aذ!ÚLŸ&Y§ka§I“ý&‡×è9V´¼Ü¢sRÕÐ@ 8ù¤ÂuéàÞð8 ðèôi8 ‡þý«fˆÏ£ ?¿\DDTãêD7“ôÌL8 ]l)£¦ßÞ½ÐuqóСX³u+t]\ðߣG…\\\ ë₃§OK½æÊíÛûÕWhÔ·/ ;vDƒ>}à1o·‡—*ÿUj*t]\à2brss1ÿÿCƒ>}`Þµ+àMZšd=œÇ@OOØöì SWWt;ûо!KzùÞ?þ—áÃaÖ¥ ;v„〘¼t)î=~\-u÷ïÕ«1oôé£NÐjð`|óë¯È*úÀTҙ˗1|î\Ø÷î ÃŽa߫Ϛ…£gÏÊܾ2ç}ùÖ-Œóò‚ã€0êÔ Önnè0r$|7oFÒË—2÷#¾†e)¾^|LÎC‡öž8ö0uuE›¡C±ëÈÉë~÷÷‡Ëˆ0îÜÍ ·¿ý†£´Wô~‘çž!ª‰@ƒ]N¨8Ó- ï‡Ë—«TŽžµ:Ì› ¸³{7ù{úô…ÿ¨Ñ¸¾õWäçåÁeÆ èÛÚV¸lÃFàüùd  wöü‰ÃãÆaOß~84z ®oýùùh9z4L›7—zxL5m-´ž4¡ßÿ€?ûõÃ߈ |0n,š‰ «VaO_éu ûö©Ò9ÞÝó§T«“]n=°Ë­‡¤UFeêLrNZZhòñÇùn-þì×_îVjš¨ïæ†×OŸâŃ€”ˆ‡xùø1ìºwƒP[»J÷­«+àeä#þrà yèhiá·•+!TSÃ’õëñ$&Fj}dT–ýò „jjضjԅ·–ùÇÑ£è=y2üƒƒ‘ðâD¹¹HJIÁñsçà>m6ïÛ'µ½–† 3+ ?îÚ…Íû÷#)%陙͢õYÙÙ8sù2>š1g¯\ÁË7o™ë÷îaÒ’%ðÄÄÇÃu̬ÿãÜòYYåæ"&!{Oœ@·±c~÷n•êmÞ=èé‰çÎ!)%9"ÇÄ`Õ¦Mè=yr©@ãÇ;1hÆ œ<É/_B”›‹äW¯‚Q `ù† ¥öQÑó>Œ>ŸŽ¿“€‘¯ÓÒp72k¶nEçÑ£›P¥óÖ,v­þ Àä¥KñߣGÈÌÎFijg˜îムü´kfùúâþãÇÈÎÉAÔóçXé燭Vù~‘çž!b A5MËØð&*ªJå4ùx0TÕÕqcÛ︾õWɘiÏŸãÎîݸµcTÔTÑxàÀŠ—ýÑ TUyâ®oÙ‚7QÑÈËÎFj\îìÞ‡µ8(ICOÏΜÅãG‘›•ô„D„ûùÌ>ø‡ãIP0ò²¥×ØÕ¯Ñs¬TyEaºf½zˆ¾p‘Ç#7+ ¹rþ¿QßÍ BÂéS‹y|ú4Ô45Q¿‡[Å?Hª©BÏÊ -<<ж¨…ËÃãÇøËEDD 3äÕ¦ys,ž6 é™™˜¶bòóóùùù˜¾b2²²°tút87k†ùãÇKÉ‘†´°0 ë[8Öã˜|±f `Á„ ¸éïä‹qçða,óô„šª*¾úáD<}*)C¨VØc'=3¿<ˆŸ-BÂùóˆ?wNj}jZf­^ñŸ|‚ûÇŽáUh(®ìÝ —¢oǶìßøæ×_—˜—-¼mâÏCü¹süõW87k†Ììl™á¼î=zïŸ~‚šª*~øê+< Bâ… 8µe šÚÛãú½{øßo¿I¶¿e6@ `î¸q?xI.àö¡CXæé |¿};®Ü¾-µŸŠž÷J??äååaÞgŸáî‘#H AìÙ³Øÿð³´Äó¤$ø}Ь,ñ1¥edÀÇÏ?yy!ñÂÜ;v }‹¾Uúßo¿aÝÎØ¶j’J¬ûóøq镸_ä¹gˆhPMSÓÔˆ2³ªTŽeçÂ÷À™ëŸÖæN­+\¶é= —]v`aw q+YJWÚóçÿÿú¢0½ä:uí=Ǫ–÷´ÄqË£Q¿þ(ÈÏÇ“À õ„üÜ<4ê×ï­eŒ={FêϘ  Þ³m¦OƒP[‘ÇOàñéþrQÍŽQÔ“w–‘â¡ÄÜqã‚óááØøçŸ˜9f ~Ù³!7o¢{»vøbìX¹Êܲ?²sr°ÌÓ 'N”,··¶ÆÂ‰‘_P€•~~øÝßkæ6UQ)Ì…’_¾Ä4LøDº«@ ¼xýýºvꝾ’¬kîà€¾ú ÝÆÃíˆÉ²žŽ6¯X¦öö’m;99aã’%è4z4®Þ¹SéúÝú×_ÈËËÃÂÉ“1eøÿJߥMüîë‹^“&!äæMÉòmÿ¼¼<Œ<+gÏ–,o`cƒ…'">9›÷ïÇ®#GоèƒgeÎûIÑ r &L€¾®náJ¡îݺÁÔÈCfÏF|rr•î-ñ1½JMÅ”áÃ1©¨»‰¶…|çÌÁé‹qéÆ xO™‚Eìl‹­{ðäI•ïy4Ž¡ñ¾¥§AÃÀêº:È~ý¦ÒåèXX†Ø_þÿíVVÿ<`nxýLvë‘×E­JtÌÌÊ,##1Qêßù"Ñÿ¯K*c]Ñÿ5uŽU-ïu‰V¨o£gmóÖ­ð<ü2Jü?š•’‚¸«W`Ó©ômmñ&:Zþ‚ šŠ”ˆ‡ˆ8zQÿ2'"¢÷<̨ l]±FÆŠaoc??`늒‡Ç·ù·( =`€Ìõýúa¥Ÿ.\»&sýˆ·|³1}äÈRËš5lXøá$- pbÓ¦2_/Þ6­œQçßæBÑ8ŸôêUj]«¦M‘xá‚Ô²KEƒ‚}6x°ÌòF¹»cóþý½u«JçÝÄÞw>ĬիñÍܹ°45•lÛ®eKD¾:x¸K÷3n`m-ùyXŸ>2×¥–¨÷ªÞ/òÜ3µ!((ʃ†hPÝž 6r@|9ïQo#ÔÒ’k;õJŒÉ ¦© ev¥/o's›¬ìJ­«És¬jyò ØZ\£~ý–.m%3«ÈÜ®?\ß²µÌõòÎ>CDDôÞ†•šÕÆÂ?y{cÜ×_cäüù€ßV®„U9ߨ”hZÆÃ©Ø“2¾iô–ÎìJOY¦Uô!¬øà’ɯ^aËþýø÷êUÄ$$HÆ©ÈÍ«ú(áϊα|uRÔì¶x+‘âš-‰¯ÒyoZ¶ ƒ<=ñW@üƒ‚àÒ¢º·k·víàêì 5µê½em‹¾ÓPW—º—d­+9hUïyî™Úpýúu¾C*9}cT®¤ÿî¨IcØuëZ¥0C”™ u]]ì89EátuefA]WjZZÉè%ÔÖ’CMªîs¬É:+E (5 iYö郿þ†‚¢îºDDD 3Þ!÷®]ajd„¤”˜¡OçÎ Räü@”ZFË#ƒr_§+Ç·6OccÑ{òdãËhRÚå|“WÞºê¼_ä¹gj“Žž!·ìÀwK%clf ½zÆrmË@ãýõ$8MF£~ýpïÀ¤ÆÆ•»½YË–h?wþÛ·_j¬‡ÔØX7m };[$ÿW½Óž§'ÄC]·êÙÛ#IÆÀ×õí ·+'L¯Õ}Ž5Yg%Yµk33Ä„„⌗W™Û¹­ZÛ.®°jß±¡—ù BDD 3Þµ7")%†úúHJIÁŠñͼyr¿^WK ¯ÓÒsæ êééUxÿ‚ýl+cÉúõxž”;KK,Ÿ1[·†‘Ô…B¨©ªB¿}ûª= ji!5=)¯_ÃÔÈèíÛkjâMz:Ò33%cYÈz ×•³ÙlyÌML°röl¬œ=‘QQ p0.\»†™«V!''SЦz•GN±¾Ñ5¡ª÷KuÝ35v~úFhëêªÛh¼Ÿ’nßAüµk°hÓn+W!hád¾H‘¹­QãÆèº|´ML`Ü´©T˜~ ÆM›¢ùœ[¾¼ôÃtûöh7k&žý7Š .-„›·`ب¸Ë 3Šf1I¸u»F몪ç(PUEA±–•5YgeÕ‘x°Ô²< €mW4êןa)<•ºvB!7n`ßÂÒÔçwí‚™±16îÝ+óA ‹šü?|ö¬ÖÎC<ƒÿÏ?cD¿~°³´„®¶6Ô…BÄTqjR¨_4 Øƒ3l”Å®hû{Ë\¿h¹]%w+÷˜¦zxàÔ–-Xïí øeÏž2À’ÓÉ(5‹HuS„û…¨º Îròþ¹ôí·È|ñõ6À mÛÐbÔHèÛÙAUCúú0iÖ ífÍDߟ×CÛÄ/""p}ëé÷Ù#G›•…únÝÑeñ"èÛØ@EM ZÆFhòñGè¶l)ômm!¬D8qø0òsóààîçÏ'CÏÚ ªз±Ó¤IppwG~n"Ž©Ñzªì9æµX¬ïÖ*jjÐ44¬ñ:+NC_6;C”žŽè‹ËÝ6æR²SSaëÚêúúüå ""†ïJFV¦ûø ???~ý5ì­­±vÁ©éYeÉÍÍ•úw¢Vëwí’¹}à¥Kp2>7ÖØ¹dç䬊 €)¶fëVÉÃ{e[¸:N ·çXé¹àoGDÀÄÕ='L(µýöC‡d–·«èCdg'§*÷ÄÅ‹áЯŸÌ™ZÄrÆÉèz£WÔ…åæƒ¥Ö­Û¹³Fï;E¸_ˆhPe¥'$âÔÌYxñà4 ÐfêT|¼sFŸ>…G£¿ßF8 5MMD_¸€€Ù_”43->—¾ùù¹¹hЫ>þcÆbØ_¡ÃܹêèàEDnn«x ƒ×Ïž!ܯðý³å˜1¼{7FŸ>…ÿ؅ƶ( ߸¯k8¸®ì9¾¸_øÿR×%K0&(Ãýÿ®ñ:+®Aï^PUWdzÿE^vùƒæ‹Dˆ:{*B!öú¿DDÄ0ã]YúóψŒŠÂÐ>}0 {wÀ޽ѿkW<ŠŽÆÒŸ–Ú^<&‚p0rD"$¾x˜4t(´55ጉ‹#2* 9"â““±õàAŒóòBdT”dŽšÐ¸~}À² üê²²³vçF/\ˆÔ´4É̇ÿùG|T(42***Øuô(¾Û¶ I/_"#+ çÃÃ1aÑ"deg£C«V’í'5UUì:rË7lÀã˜dfe!2* >7bÇáÃPSUÅä¢iN+«  ñÉɘ°hN]¸€W©©ÈÍÍų¸8ÉõkáàPêuâe^ëÖáÎÇÈÎÉAlBæ~û-®ÜºÃü†Iî"T¥õçÏqbê4œ[¾ÏΞEZ|NBzBòE"äffâŃ¸æ· §fÌDNZåî£ûýÀ¹ó‚ìׯ‘Ÿ›‡¬—/}áN1÷ÿö'õT™s ýá{$Þº…ܬ,䤥KQ“u&Ö¨h¶¬'Armÿèta÷¡Fýû󃈈š  äÔ E¦N èÓ¦ ú¹¸¼³ҭľÒÂÂp><îÓ¦ÁØÀW€iQ3N p† —#ž™‰›6¡kÛ¶€þS§â|Ñ¥ÅË€¿1iɈJ´ÚsrtÄñM›`Plüñ±—5KEÖï;y“–,)µ¹9‚·mÃÒ_~Á¾“'K÷ÛöQÜ;wbñúõ2×96h€€_•˜Òoï^,\»Vö$à» 0ÍãJçý,.nãÇ#)EvŸmM ø¯_/¹†bOŸÆøE‹Jm¯.â¯Ä4Ä&$ õêU‚r©2ë*s¿TôzU§Saa(š¹`óæÍ2·ùî»ï sëF8ê ¾[¾‡þ=ñ"ÿ»°¶¶.w ñý¢ýæ š<ŒdåÕ!¡¯,\¸BD ¡N´ÌHÏÌÄtàÛùó¥‚  pŠÍå3f   Ó}|^4Xå^^puv†¶¦&ôutàÒ¢…ä5Cz÷ÆÅ?þÀ˜akau¡:ZZpnÖ ¾_|àmÛJ=˜V'þý±váB8ØÙA¨¦ssŒýè#ýö¬ÍÍá=e œ¡.VzZÏ9ãÆÁýz|ر# õõ!TSCkkÌ7ÿüþ{©6¦‰ã~~è×¥ LêÕƒšª*LŒ0ÈÍ '7o–dTT}++\Ú½ó>û Ž @OGêB!ì,-1fà@œß¹³TÃúöÅ–+ðA“&ÐÔÐ@===tsqÁ‘ УCèµÂÉ|KÛʪíû…¨&°…)*…k™AD5-3¨"äi¡Á–DDu[f‘"RaQyØBƒˆˆˆˆ à ""z+DDDD¤Hf‘\h‘¢`˜ADDr+hlß¾•BDDDDïà ""ª´ŒŒ V½s 3ˆˆHn%g6ñôôd¥Ñ;Ç0ƒˆˆä"Ï­DDDDDïà ""z+DDDD¤HfQ¹d‘¢a˜ADDebADDDDŠˆaIÙ´ot]\XDÄ ƒˆˆˆˆà ’rýÞ=V1È """"…Æ0ƒ¤0Ì "DDDD¤èfԲ˷naœ— €Q§N°vsC‡‘#á»y3’^¾$¿zãΡëâ‚›÷ï—YÖƒ§O¡ëâãÎñâÕ+¹Êû~ûv躸à¿Gº..ÐuqÁÁÓ§%Û\¹}c¿ú úö…aÇŽhЧ<æÍÃùðp™Çó&- º..p:°÷Ä ´÷ð€©«+Ú Š]GŽH¶ýÝß.#FÀ¸sg44ßþö xƒ1È """"*EUP{ã3ooäååI–åˆDx‰»‘‘Ø~èÎnßkss|ܳ'öŸ:…GŽàGG™åí9v ðI¯^8.wÙòøãèQÌXµJª¼¤”?w'ΟÇÚ 0ÕÃCê5š€Ì¬,ü€ÉK—JÖE<{†é>>°45ÅÝÈH,úé'ɺ¨çϱÒφúú˜2|8o"DDDDD 3ÅJ??äååaÞgŸaÒС°45Efv6.^»†ß}‡¨çÏáãç‡ÍË—câ!ØêöŸ<‰Õ_|! Äòóó±÷äIÀ¤!C0³(x§l˜?~<æ/ü3-,LRöã˜|±f `Á„ ûÑG°63C|r2œ>Õ[¶à«~@ÐÄÞ^ò:¡Záí•–‘??üäå…QàÅ«W˜³f N_¼ˆÿýö"ž=öU«0ÈÍ ÉÅÖýyü8à “‰¸¨‡¬%£§o½zÆ 2ˆˆˆˆˆaU¯'±±’€@_W .½[7˜aÈìÙˆONtiÓŽ àþ“'8ræ Fôë'UÖ™+W›€èääT¡²ßfËþýÈÎÉÁ2OO,œ8Q²ÜÞÚ 'ND~AVúùáw¬™;W²^ ^¥¦bÊðá˜TÔÝDÛ¾sæàôÅ‹¸tã¼§L‘œm±už<áM¢^&ÇáäþŸYJhÄäer 2ˆˆˆˆHÙpÌŒZ$nÅ0kõj†ôê…=Çcב#Xêé ø;(™ÙÙ˜8dˆ¤KIEË.OZf¦\Û•×’B[S³Rë¨vÙÙÙ±ê(DDDD¤ÌfÔ2s¬œ=+gÏFdT‚CBàŒ ×®aæªUÈÉÉÁ”#$ÛO2{ŽÇîãDZdút) µï IDATì>z@áÀŸU)»,ºZZx–†˜3gPOOHÉ1È """"eÇ13ˆƒ¦zxàÔ–-Xïí øeÏ©m:¶nˆMHÀ…k×pÿÉ„ܼ —-кŒ)[å-», ‹ÆNxøì/‘’cADDDDuÃŒZ4qñb8ô뇫wî”Z'3®ÄàÀÿ·À8xú4v :yذj)[,77Wòsöíëwí’¹mà¥Kp2>7ò¢)0DDDDTW0̨EˆONÆ„E‹pê¼JMEnn.žÅÅaéÏ…Sa¶pp(õ:wwhkjâPp0þó7^&Ç`ADDDDu [f(¡ÜÜ\¬Ú¼ðùðá¬"’‰AÕUl™¡DrD"†DDDD¤°Þu!Æ@ƒˆH±1Ì """"…T[A† ""ÅÅ0ƒˆ*E @ Ƚ^üïšøSVùªªª077ÇðáÃqýúõ Ÿ[É?B¡666øôÓOqïÞ=¹^#ëXKºzõ*F¦M›BOO¨_¿>>ýôSܹs§RûyÛõ!"bÁ@ƒˆˆaÑ[”úSÞºŠl[Ö~^¿~;wâÊ•+èØ±#.\¸P¥cNLLÄúõëñÏ?ÿÀÅÅW¯^•ë<Ë:V8vì:w/bݺuHLLDll,¾ùæœÿüs¼zõªF÷¥«« ???ÀáÇYùDT§Ô• CŒch½{ 3ˆˆädii‰ï¾û±±±˜={v•ËûòË/áêꊨ¨¨r·{óæ +ŸˆêŒºdˆ1Ð "z·ØÍ„ˆ¨&Mš„Ý»wc×®]:t(>þøãJ—•ššŠK—.aÆ øöÛoK­ß»w/€ê›……ˆ¨¶2 MçþÈÉÎB\ÔC¹^¯§o½zÆïô˜å=6hܲ^$Æàer»œÕ0†DD °uëV´jÕ S§NE—.]`l\¹Ö¾¾¾¸|ù2Ö®]‹üü|Ìœ9VVVHLLÄÁƒ±hÑ"˜ššbݺu¬x"RzQQQRA\>ë_¡2„êš7ûïì˜ï„©ð1‹íÛ·cÆŒ¼ˆˆª»™Uƒƒ–/_Ž„„xzzJ­åþ)ÎÈÈÿþû/V®\‰sçΡU«VÐÔÔD³fͰ}ûvÌž=wîÜA³fÍXéD¤ô´µµ¡©©Y¥2D9Yïô˜³³3«å¼‰ˆ¨ú±eUÊÛf ‘gV‘ŠÌ… 3ˆˆˆˆˆˆH)%''ãÅ‹’Büo@±Ã‰ŠÊÊÊ’:yέqãÆ[[[hkkÃÆÆFòw]<f‘ÂÈÈÈ@tt4222[¥rMÕÕ%?««ªÂP(”ZoVÆtÒê**0ÒШ–sKÉÎFN~~©å¢ü|¼ÌÉ‘Z–õÿÓS‹ ðJ$*·lqàQVðamm ØÚÚJZw(SÐÁ0ƒˆˆˆˆˆˆƒ-im¬bño£­ª UU©pÂP]B•¹/,´´ê|Ë Elå ²óò$ÁGzn.ÒssüøñJ$‚¨  Ôëbcc‹›7oJ-wiiÚ´©$àPÄi¦ëd˜‘˜˜sss˜››#>>žï Xg¼FDDDDDï·¨¨(DDD ::HII‘ëu¦êê’°BGM :jj0TW‡†ªê{Yªªr…4âV âÀã¥H„ôÜÜR-<Ä]ZJ¶èhܸ1lmmakk‹&MšHÆì¨-5fÄÆÆÂÆÆFæ:###tïÞË–-CëÖ­«¼¯””œ;wƒܹsàääÄw9•Ug%붦÷Gdgg--­R2‘r{ðà"""ðàÁƒ·Žù PO(„¡º:t…BÔ ®U…²)¯HšH„´¢`ãeNÒD"$•èæR2à022’Mš4yç­7j,̸|ù2`À€8vì˜d¹H$Âõë×1iÒ$¸¸¸àرcèÛ·o¥÷“——‡îÝ»£W¯^’îž={¢@F3*›¬:“U·5¹?¢7n ::óæÍ{ë¶ Ô²’÷ÔÕ«W±nÝ:„‡‡#..999°°°@×®]ñõ×_£eË–2ËÎÍÍÅŽ;°wï^ܼy¯^½B½zõàììŒÑ£GãÓO?…j‰ä_|<§N’ùž&^/ï}ŸŸŸ7bÛ¶m¸ÿ>TUUáàà€ñãÇcÆŒPSc/A"""Rlâ–¥º2” .Ì45a(ÂLSºjjÐ-1~Õ,]¡º2£”ìl¼ÌÉAzn.²²¤Ž””¤¤¤H®­‘‘š4i‚¦M›¢uëÖ5>öF‡:t¾Q…B´oß›7o†««+¼¼¼ªflܸwîÜ——ïÀjƺ¥wíèÑ£€¾uÛâ¡€¬`ãØ±cøä“O`ee???ôèÑééé ÄÌ™3ñ÷ßãìÙ³hß¾½Ôë1hРܾ}‹/ÆÖ­[aee…¸¸8ìܹS§NŦM›pôèQ™Më¼½½Ñ§O™ÇT‹/Æš5k0nÜ8œ:u ªªªX¾|9æÌ™ƒ»wïbË–-¼aˆˆˆH!Œܸq£Ìn#õ„B˜ihÀP]æšš .˜‘†F©)ÙÙH, 9²²‘—' 7BCC  p€ÑÎ;ÃÉÉ©Fº¤¨Ôt˜QòAALܽàîÝ»¥Ö>Ÿ|ò ôõõaii‰5kÖâââ0lØ0ÀÆÆß~ûm©2ž>}ŠiÓ¦¡AƒPWW‡¡¡!ÜÝÝqéÒ%™û“÷œ‹×Yyu+ïþ  «« +++¤§§ãóÏ?‡‘‘œúQí‡õêÕC×®]«\–——rss±uëV¸»»CKK &&&5jÖ¯_ÌÌL,]ºTê5ùùù>|8®^½ŠÃ‡ÃÛÛöööPWW‡½½=–.]Šƒ"44#FŒ(ÕÊB(âÖ­[8xð`•ç΀ï¿ÿfff066ÆêÕ«»wïæÍBDDD #99ûö탗—|}}ñÏ?ÿHÚªªh¢«‹®&&jcw++¸£‘žƒ % 8õõÑÉămlð‘•:ÁZK Âb_èÅÆÆâÀX´h|||„ôôtÅ3òòò@Pf˜‘˜˜XXFFRËwïÞ ==W¯^ERRºté‚/¿ü‹-’lwíÚ5ÉT<ÆÆÆ(((@LL ž?ŽçÏŸÃÈÈ 6DGGãÍ›7°¶¶ÆòåËñÓO?áúõëPUUÅâÅ‹qáÂI¹ÄÀagg‡û÷ïãæÍ›ˆ‹‹ƒ§§'ÂÂÂ`bb‡ ׉ølll°|ùrüøã¸ÿ>ŒáííÇÃ××ßÿ=îÝ»|ýõ×8{ö¬¤Œ+W® uëÖ ÅŸþ‰W¯^áÌ™3ˆŽŽ†››.^¼XjòœsÉ:+«n+²ÿ¨¨(¤§§ÃÁÁãÆCŸ>}‡óçÏ+ì5¢Ê‹…@ ÀĉK­›5k–T8VÜçŸ@€ØØXÄÇÇ#,, }ûö­–.âþ|]ºt)µnÈ!Ø´i–,Y"µÜßßç΃‡‡z÷î-³ÜbèС8sæŒT:PQQÁ¤I“°dÉä%Ô•õòåKI™bâß1[[[ÞtDDDTë.]º„µk×bÑ¢E¥ k--t02ÂGVVlcccØêè¼·ƒtÖeºB!éé¡»™†ÛÙ¡Ÿ…šèêB»ØµóæÍÃï¿ÿŽû÷ï+f˜q÷î]¤§§£qãÆ044”¹ÍÞ½{Ž©Qòa¢^½zظq# §§‡eË–víÚ%µíÕ«WmÛ¶•, /µL܇'66kÖ¬ 6lˆAƒøÿV$™™™˜9s&ÌḬ̀}ûvÔ¯_æææØ¼y3þüóO M›6•ª“âÇàëë‹úõëÃÊÊ ü1ÀÓÓ+W®”,›¸ÅCff&†Ž‚‚œšššÐÕÕUØkD•gmm ggg–ZCCÃ2×9;;ÃÚÚÇŽCAA\]LäaooR­Ä´´´0uêT¸ººJ-·x=zt¹e3Fj{±œœ,Y²Ïž=Ãï¿ÿ^¥ão×®ä÷S$aÆ 6l´µµ±aÃÞtDDDT+ÒÓÓqôèQ|ñÅرc‡Ô€âc¨ º›™±åÅ{ÊHC.ÆÆlcƒ~h©¯zÅîƒÐÐP¬[·^^^2{Ôj˜QV“‚‚DEEá»ï¾ÃŠ+àää$i6-vðàA¤¤¤ U«V’efff€ÔÔT©mÃÂÂ...¥–É 8>ûì3©pEܧ]<Ÿ¿¿?0bÄhkkK¶300@ãÆK•[âc;v,Œ¥Þ €ÂoŠ‹·RÉÌÌ,¼@EßÊîÚµ QQQ7n,--¥ÊŸ¿¬€âmç\V•¬ÛÊî¿mÛ¶øè£d^7E»FT5DLLŒTÊ…`Ú´i¸ÿ>¢££%ëçh«ª¢¥¾>>²²’l}AŃV††p·²B? 4ÐÑ‘tEIIIÁŽ;*jÔH˜qåÊÀü@ ù£¢¢‚úõëãÌ™3X»v-._¾, *Ê’ŸŸ§OŸ€¤KBya†ø¡XVÀѽ{w©×?yòдiS…³@·nÝJ‡ººz•”ÅÇУG©åâ$³ärñ˜âc;tè€Ò-Y@£h@–ììì ŸóÛêL¼¬¢û—9~üø2ƒE»FTõ0€T ŒÀÀ@…BÌ™3jjj¥Ö‰ï©¬¬,¡S§NRa_UŒ7GŽ¥¥%¦L™SSS¸¸¸ÀÛÛ7nÜùq÷·· P$~ßJHH¹þ믿ƛ7o°qãÆJ{TT”dÌŒ[·naРAxúô)öîÝ dggKZ?Õ´   R!†©º:º™ÐÊÐ-0H®`£“‰ >²¶F##I7q¨áããS¡î'5Ú2ãêÕ«ÈÏÏG^^rssñÅ_(ì'?cÆ ÉÃgqÇŽÃÀaee¡P¡PGGGj­QüXü-iYÊâe%gV¹~ý:H¥¼víÈœ®ñùóç¥Ê­ñ1tìØQæ1”ulâ.âckݺu©²Åã[ˆé¬È9¿-\×me÷/ëdE½FT5íÚµƒ¹¹y©À¢cÇŽ033Cûöí¥ÖH–###£Úº˜ˆ 4×®]ÃDZqãF4lØëׯ‡³³3†^ªµ—¸%ÔÛ¦OÍÏÏ€RÓ³ŠcþüùX³f Þ¼yS¡cމ‰««+.\¸€+V@UU?–'ÃßߣFâMGDDD5*99k׮ŤBŒžffèmi Ûžz“ê& UU4ÒÓÃ`©P#66ë֭þ}ûj'ÌHKKÃÝ»w¡©©‰Ö­[KZd¨ªªbÞ¼yPSSêU«d¾vóæÍ4hâããqðàA¼~ýyyyøê«¯J=H?}úÉÉɰ°°‹/·œœœ ,ÑÔÈÎÎ#FŒÀµk×pâĉR¯]»v-€Â) ;wî,!$$¤T˜!kÉòÆ~(Ùõ@üúâƒEŠg(Ùb䯿þ’YFEꤼcg¹é®bÇP8îFEϹ¼ñ+*»YûQôkDÕcàÀHMMEhh(®_¿ŽäädôêÕKf$''ãúõë Ajjª¤ÛÒ±cÇРA´hÑ¢JaŠ}$aÅ’%Kн{wL›6  Äùóçñé§Ÿò†#""¢qñâE¬[·NÒ£‰®.ú3Ä ÔÊÐý--aZôœ oooÉ—êï$Ìw1)Ù]@lÁ‚€•+W–Z'î&РAɲ7nH¦åüàƒ$ËÅýÅe²º/Èó ,žÎóÑ£G’e’¥ªƒV%Ì»]|ªV pJÓ}ûö¡eË–To@ººèÞ½;þùç {÷î’ß_¡Pˆnݺ!((gÏžE·nÝ §§'w###$''ãÀRËÅ-·zöì)Y–ššŠK—.•9…©¸ËF×®]¥–÷ïß½{÷ÆñãÇ%­}J:}ú4þúë/|òÉ'pss+÷˜544°lÙ2lÚ´Iî:#>/044ÄñãÇñøñc<þ¼T÷"""¢êžž.é"+гhv¢wÉÅØXhÄÆÆbÿþýï6Ì(9-kqââ%[glÞ¼ýû÷DzeËФI„‡‡cÊ”)X½z5š5k†ž={ââÅ‹ ¿mÞ¼9vîÜ)™ ¤¼oýK>XËZîéé ___œ? 6Ä‚ àåå…gÏžÉ<§[·n•г2a†<Çfoo‹/¢qãÆøðÃa``€#F k×®¸yó&š5kV©reÕ™¬º­Žý¿ëk$ïõ¡ê5pà@„‡‡#44´Ô°½zõÂÅ‹qõêUI“£GBWW÷­ÁÀÊ•+aff†U«Vaß¾}HKKCHHfÏž CCC©÷___8;;cíÚµX¸p!ž={‘H„ØØXüôÓOX°`LMM±nÝ:©}ìÝ»®®®5j–-[†'Ož ''OŸ>…¯¯/Œ?ü;vì«>Æ;;;äääȵýªU« ¦¦†É“'#((iii¸ÿ>æÌ™‘HKKKLŸ>ñññ\”ˆˆˆ¨<ÁÁÁ’®%m Ù­„j5а.ºÿBCCe¶¸ô$Qä IDAT¼mØþ÷ܳgÏ`oo6mÚH®Åüüü››‹Y³f±¢ðñúÔŽÇ£Q£F ¥’ÝÃÄÝ—""" ££ <ÿý÷[ËŽÅêÕ«qêÔ)ÄÄÄ ^½zèÕ«|||$ûKMMÅÏ?ÿŒÃ‡ãþýûHKKƒŽŽ5j„þýûcΜ9eN ŸŸ;v`Ïž=¸qã^¿~ CCC´iÓãÆÃÈ‘#K!þ·¬·Ô}ûöaäÈ‘e®/éòåËðõõEHH^½zSSS¸¹¹aÁ‚’0# êêêxõêoºwhêÔ©§NýÐÖÕýì3öYNø@áÀ²â¾ˆÞ'GŽ‘ŒÑ5iÁúw¶ßð‹'p#¤pZøÍ›7óBPçåå…””˜ª«£w9½ Ùyyø+&УGÉçi15VQ¡ & gÏžEãÆ%Ë·lÙ¥ÛËÌÌÄ/¿ü‚€€Vž^#^ŸÚÓ°aÃ2Ø[µjUjxšSyX[[—Ùu¤$ñœÞÞÞ>L˜0&Lû5å…ððð»¬:àÈ‘#e®?tèo4"""ªVéééHIIN¹J ACU¦êêHÊÉAtttéÏ쬢Bˆ‹‹ÃŒ3””lÙ²?üð:wî OOO©í===Ñ«W/©©DIq®¯‘üŠ?,Ö“sàr¢šf®© ˆŒŒ,µŽ-3ŠøúúÂÄÄ;wîDóæÍ‘››‹† ÂËË _~ù%444¤¶ÿý÷ßYi |x}ˆˆˆˆˆ*ç•HÄñ2H!$á" ÃŒ"***˜?>æÏŸÏÊà5"""""zo=NM…£¾>+‚jUšH„¤rÑg7""""""’x•›‹ûœ5jYè‹å®g˜ADDDDDDR®½|‰G©©¬ª!IIHÌÎ.wv3!"""""¢R.ÍnÒHO•AïDv^®¥¤àIFÆ[·Uˆ–qqq066VÈ MLL„@ €……ﮨÖ/‘â°ŠŽJn€Â@ã\b"²óòX1T£R²³/ 2432`’Pæö f„‡‡Ú¶m«•zç΀“““rÞ))8tèPµ½¾ªõQÝåý{÷—eõÿqüu³÷Þ (‚ŠÍ™åJMëgšmWe¦f¦e*V–_WnMMËUŽ4m¸3s¥™g‚‘½do~ÜÜ·Ürƒ àçùx|ß<\ç\ç:×r½9ç\B!£Œt¼®^E¿hšDf&Û## IK“Á\v~>gÙCr^IÉx_E·ŒMÂŒrèÒ¥ ………ìÝ»·Æ}0òóóéܹ3‡~`õ«2º=ñä4h …‚%K–”z̲eËP( 4H£\¡P P(øâ‹/î{žÅ‹«¿·¾B!Dme’™IÃÿ®`‘” @na!ÿÞ¾Íïjˆb\LNf{d$ÁÅögq À3$½‚‚2ëW«0Ãßß_îè¶lÙ2.]ºDëÖ­Ký‡Ýžx²?Û 4à“O>áÂ… %¾ÄÇŒ——Ë–-ÓÚÆÊ•+ÉÍÍ-õ………,]ºT[!„O$½‚>žŽ;2aÂ4ŽŠŠ"::[[[êÖ­[áúaaa¤§§ãååÅ AƒèÑ£QQQlÚ´‰””ÜÜܘ:u* .$(([[[&OžÌï¿ÿÎôéÓ™7oW®\ÁÒÒ’‰'rèÐ!uÛÑÑÑDGGcccƒ§§§ò&„‡“’’‚««+S§NeÑ¢E¢««Ë”)S8zô¨ºþÖ­[éÓ§îîîqþüy¢¢¢9r$§OŸÆÎÎ//¯ ïäÉ“yçwðõõ%88˜[·nÑ A&Nœ¨ž6öìY"##°µµ¥°°ˆˆˆrmiõµGUú£­½ãÇãïïONNGŽ!22’6mÚ0iÒ$>ûì³ ƒ¨Ý|}}Õß¿~ø¡ºü³Ï>ãìٳ̜9³Ô_øùùñÍ7ß”Úþ7ß|CóæÍ100ÁB!ÄÏ61±D¨‘[XÈÍŒ öÆÄ°;2’3‰‰lãx| žb~áÂÌË/¿Œ‘‘‘zÆCdd$Ó§OÇÃÃ^|ñEFŽÉ´iÓÔå}ûöàŸþ)³çÏŸW·;sæLÜÜÜðôôT×?qâ™™™Œ=Ö®]‹‡‡ŽŽŽ¬X±‚M›6QXXˆŸŸ_…ÇW5í}úôéØÚÚbooϼyó°²²R÷àÔ©SU[mõµGUúso{¹¹¹¼õÖ[ØÚÚ²aÃ4h€ ³fÍÂÜÜœãÇWø¼¢v5jýúõãûï¿gëÖ­9r„9sæÐ³gOÆWj½ÜÜ\ À±cÇ8wî\‰¯ß¼y“Ý»w3pà@233e …B!î 5<ƒƒ±N¸­.OÎË#85•½11üÁ™ÄDbåç¨'Jv~~‰ãfF†z†N^>v±±ø\¼TéCEïa\ÀÖ­[K”988Zlcâ³Å{Z‘ú÷>¿ð %ÊUÇ*ééÊ{饗°±±Q—«Xttîf<§OŸ.õ|ðàÁX[[«ËUª¦¤ÿúë¯ÄÆÆòÁ`bb¢>ÎÒÒoooÎ;W©MOÍÌÌHIIáøñãôìÙ€úõë“””¤qœªï•[mõµGUúso{7näÆ`dd¤>ÎÅÅ…”””Jƒ¨ýV­ZÅÙ³gy÷Ýw177ÇÞÞžuëÖ•¹Ig^^o¼ñ,]º”ï¾ûNãëª}6Þzë-¦L™"ƒ,„Bq‹´t,ÒÒq 'ÑÎŽD[²Šž{Òóó NM%85}…G## q31ÁL__¯‰ÍÌ$.;›ðôtõÛHJ|V’’±LNÆ61ñ÷‘lZPP@hh(€z)¶ ¢2õ‹·1dÈ­ÏÏ>û¬Fùµk×´–«fr4lذ̰EÕnçÎ5êß¼yS£¾êíO?ýt‰>«¦­W&Ì?~<}ûöå­·ÞâÈ‘#ZÓTdlµÕ×6UéϽí©^ÙÚ©S§6¢ö³¶¶fÍš5$''ÎüùóÕ!]YÜÝÝyöÙgÙ¸q£F–™™ÉêÕ«éÒ¥ îîî2ÀB!„eÐ+(À!.ŽFW‚ð¹x —ðŒŠïgVXHDf&g““ÙÅÏaa‰‹ãbr²ÌܨaT3/.&'³?&†·nñW\ïÜÑ2tòò±N¸MÝë7hxÏdÀCš™±sçN¾ýö[Ξ=K||<êß6oÞ\kQü·"õ‹·ÑµkW­åmÛ¶Õ( à©§žÒZ^|釶þ©ÊJ«ß²eK@¹O@Ó¦MKô9::ú¾ACiƇ‡‡_}õëׯgýúõ<óÌ3lÞ¼YãNsHEÆV[}mãQ•þÜÛžê˜F=°qOÕ¬#Ö¬YÃk¯½V®×§2„°zõju@¶qãF:t¨ l-”žžNDDDì{DDD¥^ lll,ÁœBˆGÂ0'‡¸8ââÈÓÑ!ÍÜœ;VV¤™›‘kh¨nD 2¬ôôp46ÆÚÀS]]e0«Ap‘œ“C\v6i¹¹Äeg“^ÆÛkŒ220KMÅ"9¹JËG[˜±bÅ FŒA«V­Øºu+¾¾¾˜˜˜ÀŒ3hÑ¢…úXÕæŸöööê²*R¿xÎÎθ¸¸h„ÑÑÑ888¨ßšQüxGGG\]]K”ÛÚÚâáá¡Ñ†]‰²{ëGFF‹‹‹ NNNêuêÔÑès\\áááíVTÿþýéß¿?GeìØ±:tˆaƱsçNõ¹prrR_EÆV[}mãQ•þhk/&&@=†UñdرcË–-ãí·ßÆÎÎŽÙ³g³xñbMAËú 5ŠåË—3nÜ8 K—.ÅÒÒ’~ýúÉàÖBË—/WÏЫi~þùçJ× @C!Ä#¥WP€Õ;Xݹ£|860àŽ•™ÆÆá(÷ÛH¾gÙ»©®.ÖX`¥¯©ž6ÅêˆZ¤ç瓞—GlVI99ê}.î^˜¥¦a–šŠ^AÁ£ý|=èçÎ À?ü@ãÆÕ媋?0k[bR‘úÅÛ¸w#Mm{;”vÎÒʵµq¿v‹÷#¿(µº÷MªWAVf‰É½:vìÈöíÛquueÿþýllË;Ué¶ö,,,H¬Äô£ÒÎ+j¿èèh† F½zõX°`ìܹ“‰'Ò£G|||ʬobbÂË/¿ÌêÕ«Ù»w/–––òÞ{ïa,¿¨•Ä}µup}dýuõh€¾¹9Y•nÃÈÈHcï&!„âqPÍÚPQÍÜÈ41&ÕÌŒ ãÓóóI¿g€¾B¡žÁa¦¯•¾>::X`X´¡Ð”–›Kz^ž:°HËÍ%=?_ãí"eÑÏÎÆ8#ãÌ LSSÉÌ‹Gf¨–OÔ«WO]vîÜ9õëJ›5kVænEêW4œ¨hyYKLÊfxyyqéÒ%nܸ¡^6‘‘‘¡*f|úé§lÛ¶;v¨Ò Š0GGGõqª7¼(*2¶ÚêkªôG[{Íš5ãðáÃܸqC¸;vŒðâ‹/òí·ßVè¼¢v+,,dРA$&&òË/¿`nnÀºuëhÛ¶-o½õÇGÿ>›L 2„Õ«W³yófõƒ®,1©½† ¼yóÔ¯‹¶¶sá©g_ªPadlúHûÉõÿNªƒŒñãÇcgg'7_!DµR|æ†sQY¶Ùú¤››“ilBŽ¡zSQ•ÜÂÂ2ÂMuu1ÕÓÃ@Gë¢_,« V…ªИM›•U¢¬¼LRR0ÌÉÅ83£ŒtL22ù¬‹Çftîܙݻw³|ùrÞÿ}þþûoBCCÑÓÓ#77WkPüa¶"õK t˜q¿²Òú1bÄFM@@+V¬ >>žyóæaggGHHˆÆuwïÞ+W®Üwwaa!7nÜà£>bíÚµèëëóé§ŸðÑG•8>66–´´4ÌÌÌ*<¶÷Ö×víUé¶ö&NœÈáÇ™6mË–-#44”aÆ‘˜˜¨ñ¦šòž·¼ã*j¦¹sç²ÿ~&L˜ ±il«V­˜}:ëׯ'<< ¼½½>|8ƒ RwóæMúôéõk×hÑ¢§NªÐØj«¯íÚ«ÒŸÒÆr×®]Lœ8‘«W¯baaA›6møì³Ï46r-ïyË;®¢æ9{ö,íÚµ£Q£Fœ:uªÄr®ÜÜ\Ú´iÃÅ‹9zô¨ÆçGµbñ¿þ¦M›ÆçŸÀ×_Í'Ÿ|RêñÚꋚ'==]c††Wã6Õ>Ð(OqôdˆZgûöíìÚµ €·?^üÈÎ{æØnÎW¾nÅŠr#D­Ä‚ ð ®ËÊ+ÃØ˜<]²LLÉ×Õ!ÇÀle qïÒ•'…IJŠòÿ33ÑÍÏÇ8#ü¼XD;;[´7æ½?ð0C”íÖ­[Ô­[???õÌPnH———Ç| ƒôɸÖÞ‡P???BCC9uê”Ö·\¼xÜÝÝ9w¦¥†aaaÔ«W…BADD„Æ&´¥…¥‘¿V%Ð C 3„0£z€:ô4‚€=½ËZªK0¡bž–¦þoƒìôs”³9ªër‡fèÉ·íÃ1tèPöíÛÇ¡C‡ðööV—¯\¹€7ß¼ûÃrff&ß|ó ûöí“{€d\k/SSS‚ƒƒï{\³fÍÈÖ2UO[Øàîî®Þ´÷~ÇKXQ»>KÅ—œ¨–gÔ´@C‚ !„â>@±å&• jRÌÊ¿_V®!9†ÊYæ÷¼¡¥,5mÖÄã&aÆCâääDTT£FbÕªU˜šš²uëVæÏŸOûöí9r¤úØ‘#GÒ­[7W½Šª“qB”GM4$ÈB!¾Š é2`€„ÉôéÓ±³³S¿5//OOO&MšÄ„ 0,¶QÍš5kdÀW!DyÕÔ@C‚ !„B<©$ÌxHttt?~<ãÇ—ÁBˆ ¦d!„â‰~æ–!B!”T†jyÚõÿNrx÷új×O 2„Bñ¤“0C!„(¦ºd!„BH˜!„B”P] 2„B!”$ÌB!´¨n†B!„wI˜QAqqq( œœœªE¢¢¢P(ØÚÚÊ8 !ÄV] 2„B!4I˜QA—.]À××W£<11‘ß~ûí‘÷çÌ™3´jÕªFŒ“BÔ4;Ð C!„¢$ 3*¨K—.²wï^uY~~>;wæðáü?Õ5ÌÐ6NBQS=®@C‚ !„Bí$Ìx–-[Æ¥K—hݺõ#?·*Ìð÷÷—!„Ñ£4$ÈB!„(ÝC 3®\¹Âk¯½†““ÆÆÆ4jÔˆY³f‘——§qÜž={xî¹ç°¶¶ÆÈȈ&Mš0gÎrss5ŽËÌÌD¡P`mmMdd$/¿ü2–––XZZ2f̲²²Jô¡¼mW¤¿...( BBBðóóC¡P0fÌÞxã Q(4hРÄ9’’’°°°ÀÊÊŠäää_¯Ì5j›™Q‘ë¶´´ÄÊÊŠ˜˜ú÷便%ÎÎÎLŸ>½JǧÊÔß½{7mÛ¶ÅØØ'''ÆGnn.mÚ´A¡PpýúuùîBÔÚ@C‚ !„BˆÇf?~rrr8rä‘‘‘´iÓ†I“&ñÙgŸ©Û°aÏ?ÿ<éééœ:uŠøøx:vìÈ„ ÐhóæÍ›êêf͚ŗ_~IXX/¿ü2K–,)ñ@\‘¶ËÛßèèh¢££±±±ÁÓÓ“³gÏ €­­-………„……¡««ËÍ›7K!K—.%55•±cÇbeeUbÜ*zQQQDGGckkKݺu+|Ýááᤤ¤àêêÊÔ©SY´hèêê2eÊŽ=Z©cï§ŠÖߺu+}úôÁÝÝ   Ο?OTT#GŽäôéÓØÙÙáåå%ß½BˆZhH!„BñÂŒÜÜ\Þzë-lmmÙ°a 4ÀÆÆ†Y³fannÎñãÇÕÇþúë¯XYY±lÙ2¼¼¼077ç‹/¾àÇÔh÷êÕ«èëë3{öl7nŒ¥¥¥ú}Ó¦MÇ—·íŠôWÛ,ˆS§Ni”âééI^^¡¡¡êã233Y¼x1–––Œ;VëØUôµõ§"czþüy"##™9s&nnnxzzÒ·o_Nœ8Q©cµõ«¼õ333=z4¬]»Y±b›6m¢°°???ùÎBÔÊ@C‚ !„BˆÇflܸ‘7n0hÐ ŒŒŒÔå...¤¤¤pèÐ!uÙÖ­[ILL¤yóæê2RSS5Ú½páï½÷&&&êr777ÂÂÂ4Ž/oÛéïéÓ§K<¤«ÊŠïYѨQ#®]»¦.[µjñññ|øá‡ZgeTæµí—Q‘1UÕ}pqqA__}}}u½â3 Š? 7iÒD£<<<¸;{¡¢mW¤¿e=¤“ɽaÆO?ýDhhh™³2*sÚúS™1}ê©§4ÊhÙ²e•Ž-Þ¯òÖ?{ö,M›6-1>ÑÑÑ%ÚBˆšhH!„BQ ÂŒ˜˜œœœÊnÅŠôêÕ‹/¾ø‚ pæÌ†ÎŒ3ðññ¡K—.;vLã¡wÁ‚øûûãïïO½zõøï¿ÿغu+o¾ù¦F*Òvyû«-<2d7æ‡~àÙgŸ-f€r˸qãî;n½FmAeÆ´xp¿€¢"Çj›™Qžú#GŽdúôéüý÷ßxzzòñÇ3iÒ$nݺ@›6m4Ú¸páB‰M«ú5!„xX†B!„†¢°°°°:w0&&gggˆ­1ÛµkW8À¬Y³ÔpÖ¶k|TnݺEݺuñóóS BQ¤§§3oÞ<"##5Ê%È¢b¶oßή]»hÙ¾×#;otØ5b"®Ê_ Q›±`Á<ƒƒ±HK—A]´³±..ZÿÖ«î/móÈê쯿þâÀ4oÞ¼\³2jâ5> C‡eß¾}:tooouùÊ•+JÌNBˆêN5C£x !A†§ÚÔ ðŸ=2 B!ü2“­¬½*ª›ÜÜ\NŸ>Í!C°¶¶fÛ¶mèëëתk|˜œœœˆŠŠbÔ¨Q„‡‡“˜˜ÈÊ•+™?>íÛ·gäÈ‘ò+„¨qT†———BT’¯¯/FFFíü÷.'BñøUû™_s…» IDAT¥í·PùøøAçÎY¶lõëׯu×ø0MŸ>;;;~øá7nL^^žžžLš4‰ &`hh(ß±BˆÉÔÔ”O>ùDBˆJ²³³cÑ¢E2B!ÔªýžB!„B!.Ù3CTGe홡#Ã#„B!„BˆšD !„B!„BÔ(f!„B!„¢F‘0C!„B!„5Š„B!„B!„¨Q$ÌB!„B!D¢'C „Bñà„……qõêU®^½JFF† ˆx¢øúúÒ­[7ˆìرc?~œâîî."f!„B<AAAìܹ“k×®É`ˆ'Öµk×$Ìx¶lÙBVVóæÍcüøñh„B!ÊFff¦ „¨6løØÎ½cÇvîÜ©Qfîà!7E<1²Ó’Éɸ#ñdee©ÿ_ !”$ÌB¡ÕöíÛÙµk— „¨1\]]ùüóÏùyúé'<€®¾!N>ípöi‹¾‘‰ÜñÄ <@ä…C2€B(É B!´ –A5Jddä#?g`` :È0±v¤yß‘¸·ì"A†â¡°¶wîaaa2(â‰%33„B”I?% ë Ó2¢ÚJw­Oº«çc9÷–-[匌Æ=†Jˆ!„x¨<¼šÓÄïŽþ±Qfhˆ'ž„B!ʤÈÍÆ89^BT[Y6Žå¼ÇŽ#11Qù€Ñº—BˆG¢a³¶hˆ'ž,3B!„¨„sçÎ``b‰£·Ÿ ˆâ‘iج-Ÿ{%'âÉ%a†B!D%\½zk÷F2BˆGN ñ¤“e&B!„• zU¢ž± F‘ä„XömYÉ•3G¹AFêL-¬ðhØ‚./ Á·C÷uÒR’¸zî_üžî%(DÉ’ñ$“™B!„¢Ê"n\á³AÏrêÀvúùˆ¯>É’=A ÿ|)1a×Y4á-þÜòF‚ü|fêGpàq@!*Ifhˆ'•„B!„¢Ê6, íN"Ã&-À¯SOŒLL165£I›Î ÿb)ÖöÎDݺ¦QçÀ¯k‰ ¢ž¯  U †xÉ2!„BQe7.À³qÉÍP½šú3ÿ·@õŸ§íέ«Õ^ñåH¶,›ÆüßIˆg×ú%\:q¤ø M¨ßÄŸ¾CÆâݬµºNaa!ïw¯±©9³~:ÎÆEŸqæÐ.lÜørí~.ÿ‹?·|GÈ•@r²³ppõ c¯WèþÊpôôô5úwáø~~_3ŸðkÿalfNÛî/ñòÈ)ÌÑ—›WÎ1kóqÝê‘–ÂîõßpúÐNnÇFbhd‚wóÖô4–úMd#Xñx %'B !„B!ÊÍØÌœÜÄ,BƒÏÓз]™ÇN]ó'Iñ1Œû?_Ì,mX²û?Bþ dîØØ9»3bê·¸yùÂʯF3{ôK|ºdÞÍÛp;6‚ìÌ <4ç»iðT·yó£éäççsüm|7m4^ÍZóù÷{±°¶góÒ/Ù²l©É·8êsu_NÜÁòφãÿlÞÿjúFl\8…çN$4è<æV¶ê #.òs>@NV&ƒ?‹_b®³âËQÌù"/ÜL£–íåÃP Ì›7Oýºc!†÷#ËL„B!D•uù}:˜_VÎ""$¨Ìão)_m[·asr²3Y6å] 7o#õ›¶ÂÐÈw寮6æ+òórùmõ\uýˆëW¿ñz ¤u—Ð74ÂÈÄ”³Gö`bfÉ[ãgáèVcS3^:€þتn#';“õó'cnmÇÛ‹°sªƒ¥=ƒ'ÌáÄþ_),,Ä£A3òóòX6å¢Ã5ý{ü:õÄØÔœz>-yuôTeÿ¾Ÿ#„J:wîœ0Ð%'âI 33„B!D•=ÿÖvþ°ˆë²cÝBœ=¼éØûº½ü6†šo} :¯ 3µP† {·r;6‚®ý‡beç¨ql½¢cB.Ÿ½[?ø‚: iÙé9ãGMÿ¾Dÿ,¬í”wiê²³‡÷’O·ochd¢.71³ÀÑÍ“°k—ð( [NØÎ­«iä×-Új´]¿i+n^9'„JÊÈÈPÿ·o»ž2 ¥hâ×¹\ÇÉ !a†B!„å P(è3èCº¼4”3‡wsúà.Ÿ<ÌÏËÿÇÁßÖ1yù¬íJ 3ÎÙ @óvÝJþÀj`@^nN±0CY¿cïWîÛ·‚‚b°wñP—_¢Z‘@CÔff!„Bˆ‡ÂÚÞ‰w1ö…æW—'D‡“š|K[l\H»£Ü/Aµ¤¸°k—€»Ë9’âcHNˆÅÊÎQc¶À¡ß~`Ýœ ÔmÔ‚‘ÿûwï&™°mÅLvþ°ˆ:^wà Õl W6R’HŒÄÜÊ;§:ýûv†Æ&rsE!†¨­dP!„BQiß~1‚û6#>Jûƒ†Æ¦ÀÝ¥"pw‰ˆjóOcS @s)‰Ê…öЪ³rùjF…jsÎâölZÀ;Sãݬµz/Œë—NPÇûîÌ‚|å²]}ÍWµž>´SÙ~±þééõ_!÷\Ô<²)¨¨$ÌB!„•¦§o@Jb<ÿþù‹Ö¯Ÿý{M[ßݸ0¼èM$Å—”4öï@Pà?õoÇFrò¯í¸z6¯S/àîæŸ [”8ß۱ػÜý­sصK\»p7Ou¹ƒ[]â#CÕeÙYìݨ DЇ-õ›´*jë²Æù.ÿ‹I¯u`ûÚòaÕš¢¶‘0C!D4ùfªÖÿMºq‡oñú†4ð(üœU©S™6d„xÐ^:s+[¶¯]ÀÞM˹AnN6·c#Ù¿u5ëçMÂÖÑ£>/Q7%1ž¬ŒtŽü#S~Z2•РóädgróÊ9M„‘‰)#§­DGW·(Ì8_f”œ™Ñз]GNv&—N"ä¿@tuK®®~öÿ°mÅ,R“oz• ¦`ne£ 3Ý Kú½3=}6, üúdgfpöャüj4·c"qײׇhñðÈžB!j…ŽÆV6Ômß™ºí;Óð¹Ø:âu óóep„x\ëòåÚýìÞ°”ÃÛ7ðËÊÙäçalf“{}z¿9š®ý‡alj®®Ó±÷+œ>´ƒc{&ìú|±j/vÎu˜¼|ÛVÎäë_&'+gš·ëFßÁc±´uP×/k™Éà sX;û~[5‡}›Wòl¿!ô4†œ¬Lo_Ï×c0tâ<¼›·¡ËKCÈLOaßæ•Lxù)´xŠ7>šÎô}ðlÜRÝný¦­øvÛ×ÌgöýÉJOÅÌÊßÝéùÚû¸Õ÷‘ƒ¨1”ÜCC 3„BˆGhF=s?ë`áì†Ïó/Ñaô'xwëMëÁ#8¹ziµì¯µµ½3oŒý_¹·wqgúú#%Êëx5fì×?Þ·þ‚ßÏ—ú5ÆÍÛP¢¼Ç+ÃéñÊp2Õëdû úP]–NJb< ›k¼=”35ÆÌ^'7\ÔxÚ—^zIFÔ(²ÌD!D­’Ÿ“CÒ­þY6—ýÓ&Ьÿë20B «¦ÈG/ú¢Q~ø÷õ´{®¿ ’¨Õî]r²uëV e`D!33„BÔZÁì ×ŒÅØzz—øšj¯‰ÒfJÜïëu;‚„kA|׫-Ý?›Ïó/a`bœ&Nò!5–¥­É 1ü8oC'ÍÇÐØ„ÓwòÇO+ðjÖš.ý†È ‰'"Ðå œ囄$Ðf!„™BW9±àî—Q—‡w·Þ øv#Š¢Íì6¦ËÄi¸úú³íý7+öÃdÏyé›uíéàÐȇFMhùêÖüß3¤ÆD•¨›Ÿ“G»§yuí¯è{õ¥SS_^\´š¼œ‚÷þ@óoðü¬¥ç1µ³Ç»ûóxwëÍS?æÌ+5ÚÏËÊ@ߨ˜vÃÇâ?ø=ù`‰Zá¥á“0³´áŸ½?3åͧÉÏËÇÞÕç}@ï×G¡o`(ƒ$ž¸@C¡PPXXH®ž<& 3„BˆÇ¦A÷>Ä]¹øÀÚ,,(๩s9·y'W/%éVF–V4}ñºNžNÞ/âõìs\?øG¹Û|æãÏPèêrüÛùœÝ°ŠÔØhôŒ©Ó¦Ï}9KWw:ü9;?¡%ÌÈ¡÷Ì%œÿùŽ-Kz|,6õ¼è=s n­ÚÒzÈ‚÷þŽ•{=zM_À?Ëær~ˤÄDbfïD“^æé±“éþÙln=Hbȵ»íçæ…¦ø½ù»'}Àåß·<зÄñ8èèèÐóµ÷éùÚû2â‰gëàŠ¾¹9Y( òud7Qþ—!BQ«þaÓÓêN]žzw ]'O pÓšÖ¾®'Ùð!·o\¥ /ŒÛ œ\½”¿S†Mþï• µiU§Ç–ÎåND¹¹d§¦pý¯=ü:z0Yw’0³×¾¤ÃÀÌœðSÿ°wÊG¤FGR—Gµ þš1›¢%6þƒ†£k`ÈáùÿãМ/IºB~v6w"nñϲ¹ü½x:zz´|m¨æ Ц›ØÚqußNÎý´–ÜÌ r‹^§)„¢fKˆ g÷æ%äædýµ_ˆQÑ’!ª3™™!„¢FSíAQšs›×qñ—Môœg~üNkù•¿ÐnÄ8œ›µ¬P{·o\ÅÁ§)½g.aÿ´‰¤ÅŨ¿uî4ó}ÝˬjͲeñWÿÀÔÆö¸øËF­m\úm3džÇSK=Ï¥í[ä'„µˆ¶ CQfÞ½úT‰²Âü|²SSIŽ&æü®ýñ±/VËñUõ¹kù°=f!„¨] ÉJI&úâ97®&hÏoüqA—´–'†ÞÀÌ¡bcîüd¯oØAã>ýñéݨó§ ýç0¡Çvò…÷Ùóãök%ÊrÒ”!j +7e òÁñà2Û²ò¨Wê×’nÞÏ—BÔ÷Ý»wçÏ?ÿ¬öýVèêbde…‘•ö>>4{õ"OŸæÀÔ©¤ÅÄV¹ýf¯ ¤ã'ŸHQH˜!„¢F+ím#“*(¸WnfúFÆj/æòyVt÷§Í°Ñø<ß×–mpmÙ†£>!->–#ó¦qnóºRëçegÝ÷&fåê‹¡iéã™™œ(¸&9!–^l¡1Ëÿ¼ŽN±Í_k“”¤>ìÓ {íx<¿¡MŠaÜÿùbjaÍ7{®È‡OÔ¨ cРAØÚÚVÛ0£x° ÐÕÅÐÜ[//ê=Ó™F}ûâêïÏKkÖðË¡¤ÅV-а÷ñ‘ˆ„B!Dͤ£¯_ö?žFÆZ÷ŒÐ76 §ûI¤ÇÇqpöçœý9ÖuëãùtW|z÷Ãý©Žôžõ º¥.o)œŒ4 Í-™×Âì”;r“Ÿ¡Á¨ãÕøi)I\=÷/~O÷z,×uïù#C‚p÷núØÆúVÑX×mØ\>x¢Æ:t ((¨Fô¿0?Ÿ¬äd"OŸ&òôiέ_O¯yó°kØî3¦óëÛïT-Ìh$a†„B!Dµþi¨ t ÉÏÎÖø’]ýeVµónDôù3%ÊU›m¦DGV©kI¡78zƒ3?¬¤åkCé5c1mÞ]¥0#)4§f-±õô&êÜi¹ÿOˆ[ÁçðhØìµYŸÏ¬Qýhâÿôc 3´ß§UGÖ‹y¬c­ Žê6’0CÔ¼ £&K‹‰eç˜y}ÛVœZ´À½}{ÂþùGýu#++ZŒG‡˜;;££§GFÂm¢Îžáìšµ$ݼ @Ë!ƒi;z´ºžj¿‹?'p}ß¾ µ¥[›6ø ‚]Æèr'<‚àÝ»¹°qyšËI+zÇfÍhþúk85kމ­ yÙÙ¤ÆÄpóàA.nÞBVrr‰þ86mJ‹7ßÄÙ×#+K²SRˆ½x‰ó7uæL¸÷ò6!„O¤ì¢¥"N[”øZÛ÷>*³®ßëô–7é;€¨ó ^\¸Š1'®áâë_âk—wlÀÜÉ¥J×{óØAåµ½;Fë×=;wcÄ@:ÿL>µˆú»a‹Öæ_×D=ßÇrMûü¥õù¢0£…|ð„XæíÛ\üi³òß³.]ÔåfŽŽ ܸß7ßĺ^=ôŒŒÐÑÓÃÌÉ‘½{3àDZoܸ\ç¨l[yùÔ}úiú,YŒkëÖZX khˆW}Úù€î3fTé<ž]ºÐïûïðêÞ3'Gtôõ103ÃÖË ÿwßeঘ:8hœ£aŸ>ô[õ=õ»uÅÄÎ==Œml¨Ûùi^üv9M¬÷]ff!„x"Å_ÆÍ¿Ý>›ÉžÉc¸r ;Úü7¿6dÝIÂÈÒZË%yÔmß™®38¿ù’#najgOÃç^ Í°Q{ŠB™ƒÿ·x û¾ø˜ð3ÿ’“ž†…³+íF(ƒ•ø ËUºÞ³Vá?xz÷ãÅ…«8²pw"Ã0±²¡A¾t™øfæš[ʇ£š»pü/þÜò!WÉÉÎÂÁÕƒŽ½^¡û+ÃÑÓÓ\"u+X¹„£+ßN}Ÿ‹ÿ  ?Ÿö=ðÊè/00¼»¿Ë‘9ºû'¢o]''+;ç:tè5^oŒB¡P0uhwn]½»ÅŠ/G²eÙ4æýz–÷»×ÇØÔœY?gã¢Ï8sh¶NnôoR¹ûz•íkæsåì12ÓR±ur£cïWèùÚûèêé•zþù¿òÑ‹-HNˆeö–8¸z(Ü¢ÃÙµ~ —N$)>Ccê7ñ§ï±x7ÓÜØodoP(˜¹ñ(?Λħ``dL×þÃè;xl¹îË--ÁQyîUDHŸ½õ Žu<™õÓ?m¦§$óq::ÌÝz sK2ÒRؽþNÚÉíØH LðnÞš>ƒÆR¿‰Ÿºnaaa©÷å˵ûåI‚ŒZd¨Ü<|˜Vï¼³ïÝ ÓÿÝw0up îÒeŽÎ›Çíë×°kЀŽÇÞLJ¶£F±cÔ(×®#píºRß@R‘¶4ÐiÂ'\ùýwÎoÜHJD†æx÷êEûÇàÙ¥ ;pëè±Jç©‘ï£ÐÕ%pí:.oÛFz|‚Iדõ÷eüÞx‡¼ì,þš9¥J×{'"ŒŸŒ  7—&/äýƒç˜x5‘1'¯Óó 003'æÒ9ÏûJ>ÕØñ?¶±ð“7ÉÎÊàóï÷²xçe¼›?Å–eÓøeÅLÍ{žOR|4z†œ>´“ÞoŒfÎÖS´éú"~YËŽµ ÕÇnývkfÃÝ» 37eζS8Öñäçåÿã·ïç0uÍŸÌÿíf–6¬9Ãüß¹Avf®õønÚ4mÓ™¿Ÿ£kÿaåîëõK§ùòíçÈËÍeÒÒߘÿ{ ž[²õÛéüúýì2ÏŸœKrB,¦Öê #ä¿@>Ü…—Î0bê·,ý#˜O—l#1.’Ù£_âÚ…“êsߎ$3=k{'~[=—×ÇNcêšýèèèòËÊY\=â¾÷%)>†ä„XÌ,m°s®S¡{åèV]]¢ÂÈÏËÓh÷¯_Ö•‘FÃ11·$.ò_ éÊß;72pÔç,Þy‰ñó7}ë:3G¾HPà?Å®Kû}™´ìwùF’ £Ö)ÛÚ¨Ë,ÜÜÈMO篩_{ñ"y™™äefsþ<¿š¦ülV¾½v*Û–Ž¾>1.pxÆL’CoQ—OfR6näÜëðîÙ«Òç±puàÌš5¤FGS—GNZ:·þþ›}“&‘’‚‰úø¦/D×À€“ß®àÄÒe¤DDŸ“CjTg׬áôwß¡£§Kã~ýªý=—0C!Äé¿ÛØ1~8±W.’—EVJ2·Žaã[/zì9™ÊM<õŠÞL¢gh¤ 322¸qøO6¾Ù—›G’u'‰¼ì,â®\bß—Ÿ°ëÓQ• V=ßžãßÎ'áz9i©äçäp'2Œ Û6°ºo'ÂN­ò5_Ùõ+«útà¶ ¤D†“Ÿ“CnF:1ùkFëúw%;5E>ÕØÙ#{01³ä­ñ³pt«‡±©/À?lÕ8Vµ_†¡ýß›„»wLÌ,è3èCNþu÷¡öÀ/kxiø$Ì,m°°¶ãÕ¦bbn©^>p3H&ßä2âºòÍá7þ£C¯´îòú†F\ü÷@¹úš——Ëw_ÆÌÒ†÷¾XŠ“{}Ì,¬0"#3®_:SæùCïÙx3';“eSÞ¥°°qó6R¿i+ Lp÷nÊkc¾"?/—ßVÏU׿þŸ:ðÞdl\ppõÀ·C¢`äì}ï‹¶Í?Ë{¯ô ±wñ ??„˜puyNv&ûþc3 z |—ü¼<–My‡„èpFMÿ¿N=165§žOK^=Uy]EÁSY÷ÅÈÄT¾‘$Ȩ•ת~£˜ñÝgÛG¼Ï÷Ÿ!9´ä ƒÄåñ&&åj¿*m]úy«Öòëû•oqhìSéó$…†ðLÀdÐ îÒeVwéÊÎѨËÜZ+—´ïÚ¥µOW÷ìÀ¥•_µ¿ç²ÌD!Dô ^Ézñ—M¥. Yѵ•ÆŸsÒÓ4Îvâh¹†{ûYZ¿Óâb88û Îþ⶯Ç_½ÂÎG<ò±ƨéß—(³°Vþðš•‘¦QZ´Ä¤]þ˜YÜ]2eíà (g$¨›’™žÊK§iÖV¹ÞÜÁµ.K÷k¶Tr_ˆâaBËNÏU¸¯ÿîû…¸ÈPú‹~Qh`mïÄò?¯ßÿüê2eðÏÞ­ÜŽ kÿ¡XÙ9jÔ¯WT/äòÙ¡O‡^1µ°º{°B€ŽÎýßswóÏ•ºWÎ^Ƈ‚£[=ŽìØDjòm^:sKþÝ÷ ·®^¤‘_´h«Q¿~SåßW7¯œ»ï}© ¢Â®É7»†FØ9Ö‘ £†ædßÑ|c—‘•%M_ˆ[›Ö˜9:blmŽžŠJ¼á©²mݾ¦ý3}'L`ÞBTä<¾ü’–-ëGêwëFÜåËDž:EÄÉSDR˜¯¹¹¨¹‹r®A»w•Ùg 77 3„B!ÄÃQPP þm¾½‹‡Æ×T³·~Z£­:Uª¯ª6=¼Êfh;¿*̸uUùç:^MJÔOŠW>äXÛ;—¨ïÙDsJµj³Q÷­í--33*r¯\ê*_®œB~â¯ßHˆWÏÊ(ºü´d*?-™ªýaÎÉí¾÷¥ºòõõe×®]òM~ÙÙ™d”³«rVYñ׊¶3S{{R££9±t)ÑçΓuç¹¹äçóþ©“ån¿*mé’—YòÞé-aÍ̪Òy2oßæß%KøwÉ,ëÔ¡N»vÔïÖ??ž™2]}.ýüsѹ2103cÕ3Ï’“–V£ï¹Ÿ° IDAT„B!„5À¡ß~`Ýœ ÔmÔ‚‘ÿûwï&™°mÅLvþ°ˆ:^wS“o“X´ŒÄÜJsúrTèU­éþÏôÁÿ™>\=‚M‹?'èì?¬ž1–±s”Ô%D‡“š|K[u¢ÚøÒÊÎk{§JõõÎí8,mÊ~@Ór~ÕæŸæV¶Ø9)§ß§ÝIÐaT®]î.ËPÕ·°±×8’â£IIŒÇÊÎ KÛ²û¥s+[l‹Â„Š\¿2Èñ .R¹F~φ¥ê½2TT×õíþ MÊÕ§{ïKuæîîN@@¥üûI·`Á‚r÷¤6õëÓ¨Oàîžpwˆ|Pb 3§Š}oT¥-OOâ.—|+™•‡r–VZlÌëóðpsiË÷ëGç€É´xãuu˜q'<{¬êzwér¾ïf!„BÔ{6-à)‹q­×P]~ýÒiêx߉ š• ££¹ß»êMMÚtÖzž-žb̬uŒû?_.Ÿþ»X›%—R¨~ûo0R‘¾™š“ž’tßë×v~ÕL beƦ¤§$‘—›ƒ®žæºþQ¾’´UçÞõï]Tt] +7+£"ׯ 3”³RnÇ„sñßDܸ CÆiìá¡§o \£ Ü}*Ϭ’êhˆÊ{Rƒ +zÍ›‡Ž¾>7öïרŸB·hiYz\|‰z­‡¿ …… P £§GÁ=oRèêjì7Q•¶š¼ô’Ö0Ãë9åFÃÅ¿VÑótûß4\ýýÙóñÇ%‰kûöÑ9`²Æ’•ˆ'±÷ñÁ÷Í·Ø7qb‰sÔi׎NŸ|Ìõ?÷srùòj}ïåm&B!„5À۱ػÜ}à »vIN¸yú{˜½¨þïðÿ©ÿ;7'›“ýŽ©¹­:?À–eÓøt`[õŒ €Â¢½,­ïþ^ôvŒâË7T¡‰GÕî«[}åÇE†j.c_hκ9ÊuþzÅ‚„Æþ4^S Ê OOþµWÏFøuê¥Q¿î=ýW_WƒæåYJ¾É¤"×`bf•#IñÑìÝ´cSszí•¡R¿I«¢v4V.ÿ‹I¯u`ûÚ÷½/B‚ŒÚBßÔǦMi?v,/¯ÿ 7Wsxú ã’n)g6´= #+Kt phÒ„žs¾ÆÀÔŒ”Hå 6Ï®]ÐÑ×îîcQ¿[Wtôô0¶±©t[yù¸¶ö§ýرX׫‡ž‘!æÎδxãuZ¼öÁ»vW¡Ï Lììè>}:;``f†BWsggÚ} |‹Éíëw7S¾¼myYYÔïÖ•nÿ›†e:èèéabgK“ýynÖL,ÝÝ103«öŸ™™!„BQ4ômÇ…ãqð×u<Ûo0WÏŸ !:]]=òór5ŽUýf¾M—øiñ¼7u9FÆl[1ƒ¤øh†MZp÷……ÄE†²iñ¼°]==~^þ?€Ô)‰ñde¤cdbªž-qï †Šôõù7Gø;Ö.à­ñ³Hˆ gõÌHOI¦eÇå<ÿÝ aàÈϹøï~Z2K{\ê5 2$˜5³ÆcdbÊÈi+Ñ)z#€º~£æ÷ŒŸöëÒf”Ü”´"ׯâìáÍ•3Gùïôßôò‘æ›U€~ïL øÜq6,àíÉ qp­ËåÓGX=ã#r²2q×yÊß!AFMðþéSe~=òÔ)öMšLvjªFùùõèö¿i48¦ªËÓbbùeØ0Ú~0 77ºOŸÀrÿÖÄ]þ—V~Ê2e1Ëý[W¸­mÛ…#™™5›^óçÓâÍ7Jô=hÇ}>*zžõ}_À­Mk,\]é½pa‰öó³³9¾h±úÏ©Ñјú%Ýþ7 ïž=ñîÙ³Dø  N._&a†B!„¨ºÁæ°vö'ü¶jû6¯äÙ~Cè3h 9Y™Þ¾ž¯Ç `èÄyx7oCÈ•@úàÜÑ?˜5êÿHIJÀɽ>ïM]NÛîýÔ퀡‰)ÇÿØÆ'Ú`lj†cOÞ™²˜½îþ ݱ÷+œ>´ƒc{&ìú|±jo©Ë*Ò×fm»0vÎz~^þ?ƾÐcSsêùøòNÀbõÞ÷;ñ Áι“—ï`ÛÊ™|ýáËädebãàLóvÝè;x¬Æ¥õ?´Ë4Ôm T*rý*.u•a†±©9Ͻò^‰óÔoÚŠ€ow°}Í|fП¬ôT̬lðíО¯½¯žáRÖu=L9éɸººÊ7«]^V6·ˆ9ž«{ö~ü¸Öã®íÝ‹‘¥Í^ys2nqò§V¬$=.ŽS+Vb]¯¶õë«ß‚rxæ ž ÀÞLJ‚¼|’CC+Õ–nÑ£r33 ûçvŒI«·ßÆÞ§z††$‡…qå÷ß¹´åç*õ95:š-¯¿AóW_¥nç§1sp@×À€ô„¢Îœ!ð‡I Ñ8ÇýûIºyß·ÞÄÕß[[ òòH åúû¸°y3¹¹Õþs (,,,”¿„BÜkΜ9\¿~ƒÛ18Ÿ9 "ª­$Ϧ¤x)$W¬XñÈÎûÞ{ÊN׿ÏàÞ²‹ÜQ%_À•3Gyùý)ô~stëÿ™­óÉIO¦yóæŒ5JnèCúûÆ·]OZuèýP‚Œ   õF£žÁÁX¤¥ËÀ‹Ç.ÚÙ‰X­ÿÆËžÿßÞ½‡GUßyÿr™K 3B’$$B˜A,$´h$´*›¸ÖRm5éÚÖj­ÝÖô¶ ­»­O-Ö^žª­­5 ¶kh‹+ÖF-I´JRA !PÈ…‹$Èe&Âþf$d€0¹Í$ï×?2g~çÌ9ß3óñûû`í~û ½ÿÎߟ0WŸÈùRÀÿ‰Cû=v»: ÆjGp.¦™# ««Sµ{ßÓ?¼WæˆHÝýÃ'P×ÐéjÓl–$ ÂŒa@ô ÌFÀÝòq?vXÉöT}î¾ir쌀:ÿNW›ö•<'牞§·dggËl6sc‡Ðñcõzï­€3€ñ`aYÀžû‰Cûuà›=AFJJ ?ª‡AMõ‡]&ÈÀXG˜0§ŽPmÅ 1£ñànOˆ!õ¹¹¹fd„Ò|ô€š s ƒ–/_®¬¬,Š1Œ2€„>ˆU}}=…À˜“ ‡Ã¡ÔÔTÖÈ&éééÚ²e Ap ÀEuNˆÒ‘EË)üV—qd~L­]»–â999ÊÉÉ¡À93u&$L퓦Pø  €W«W¯Ö³Ï>K!0ÒÒÒ(caÀ+›Í¦üü| ¿D @ !Ì…0 P3@@ái&\¦ššŽØçgggËf³q#€A°}ûv•––jùòår8a—©¨¨H{÷î±ÏõÕW•——ÇhûöíÚ°aƒ$iïÞ½ºí¶Û´téR €0€ËÔÖÖ&I 5hRtì°}îñcõêìp©¡¡› йA†›û5ø? |4):Vÿö™{‡íó^üÃÏu´~…èÜ #4Ì ´Y*ùÛ êhwh@€`PŒçYŸý²æØ+ë³_Vh˜ARO‡ÆöíÛ)ø1 Œ Þ‚Œè˜8IRtL Œz 2Ü4 pf`TëOáF 0£Öånàÿ30*ùd¸h€#ÌÀ¨3 Ã@üaF•Á2Ü4À?f`ÔÌ Ã@üaF…¡2Ü4À¿f à eáF þƒ0m8‚ 7 ð„XÃd¸hÀÈ#Ì@@‰ Ã@FaÎHn0r‚)`¬Ú³gOû9Î=o§Óéó¹'%%qãðü!Èps/<ý˜:Ú]žóZºt)7 †a`L*..ÖÆòÜëëëõðÃû´ïªU«”‘‘ÁËŸ‚ 7 ~L3ÀG–ɱ÷y‹…‡€åA†SN`xÑ™“222d±XTPP —ËåÙnK˜¯¹Ž«/¹˜Á(ë”øa=çÔåŸÖìy‹Õîºô4—ÝåÛTS½Óó:**J¹¹¹JNNææ# dHR¨Á¨í{þ‚ã#&NÒÒY2˜ÌƒvÇ×éÝ·ÞPóÉãj0ª£½çï” 6Èd2Éápp`fÆ,‡Ã¡ÄÄDª¬¬L’TS½S.§–]w«""ý¯‹áRJó‰F½þò3:RWíÙ–žž®ÌÌL™Ífn:Vaaa¯×-'›Ôr²é‚ãk¿$iùŸ´søÇëÕÁ½»/kŸââb „€1Íl6+//O©©©*((PSS“ŽÔUkÓ†µ0ízÍ[”0ײëí-ÚQ²Y=ÿW˜n Œ&™™™*//ïרºº:¹\.:Ñ8¨çàîŠ2 Š‹ëßô–´´4n  $%''kÍš5***Ò–-[ÔÙáÒ›[7é`õN¿íÒpk8Z«×7?£¦†Cžmtc`´ÉÈÈè÷âµëÖ­SuuõK\\œòóó¹)0‚38Ël6+''Gv»½W—Fáoï—#íz-L»ÞïÎyGÉf•—lö¼ŽUvv6Ý`T#Ìà<î.W_}UùË_$Iå%›u êŸZvý­Ã¾ð§7Þº1V®\©¬¬,n õ3ðÂl6+++ËÓ¥Q__¯¦†Czþ©u#Ú¥ár¶jwùë}º1rsse³Ù¸q`L Ìà"l6›Ö®]«^x¡O—ÆâôO)vzâ°KýÁ*ýý¯¿WË© I7‹3èo]/oü¥æ.¼ZŽÔëd0ÝB›.g«ÊK_ÖîÛ<Û”——'«ÕÊÍcaýäîÒ(..VQQ‘\.—vïØ¦šêúØ'o’.ó»1 ƒ233ûýT€Ñˆ0€Ë”‘‘!»Ý®'Ÿ|RÕÕÕj9u|л4\ÎV½¹e“ªw¿åÙF7@ |`µZ•ŸŸ?$]öþS¯o~F.Itcœ0€¸P—†-a¾>þÉ[.«KÃålÕý½jªwz¶¥¤¤(77Wf³™bœE˜À¹»4ÊËËUPP —Ë¥šê*¬¹_Ë®¿U3f/¸ä1¼ucäææÊápP`€óf0H‡UPP wß}W.½úüíÒh>Ѩ²­¦à2f0ˆÌf³î¾ûn¯]K®½Y‰ó{Æîz{‹v”lötcDEEiõêÕtc\aCÀ[—Æ/?£½»Þ”=õ:U”¾¬#uÕžñéééÊÌ̤ 3"î.ÊÊJ¨©©IGêªõòÆ_zÆDEE)77WÉÉÉ  Ÿ‚(C+99YkÖ¬Qzzz¯íéééZ³f AÀe¢3€a`6›•““#»Ý®ŠŠ ÙívB f0Œ’““ 1ˆi&  f€€B˜ a(„0Æ™L&ÏŸ»Çóœø§±ç{Ûç= ãl6› ƒ$édd$_h‰—$Y,–>ï¹¼*//Waa¡Ž?N1®½öZ­^½šB€ìv»ÊÊÊÔdµ(úƒdr:) FÌᘩêî‰,ÒÒÒú¼OgÀ«ââb‚ ”×^{"Àdffzþ\3ݦ® ~.bd´::mš¤ž)&‡£Ï:35¾­EæCû)ü–+j²:,S) ÕjÕªU«´qãF¹ÌfU'ÎVBÕ^wwS ›Sáf˜5K’d0”››ëuaà¢Æ;[µ…€ßjºba ’ŒŒ ÕÖÖª¬¬L.³Y»çÏÓŒ}û4¡¥•â`ȉ‰Ñ‘i1ž×ÙÙÙ²Ùl^Çf<òòòd4µeËukR’¬Gjê¡Ãti`H´ª‹Së„Ivdx›^âF˜è%''GIII*((ËåRÔ):n±hòÇd=z”Pƒ¢=4TGb¦ªÉjõl‹Unnî;2Ü3}8ÅÇÇëÉ'ŸTuuµºƒƒudZŒ>˜M¨ñbHÒÊ•+•••Õ¯cf¼²Z­ÊÏÏWee¥ŠŠŠú„OœPôÇxŒ+ú¥qÒ$·X<ÓIÜ–,Y¢ÌÌLYÏ 7.†0pQÉÉÉJNNîj4Y­j²ZehmÕ¤Æãšxò¤Â::(<ÚŒF·ô„ÝÁ½#_B 7 @¿œj”––ª¬¬L’ä2›uÈlÖ!ÅlÀ`œŒŒTgXX¯÷ ƒRSS•‘‘áSˆáF˜¸,îP#;;[¥¥¥*..VSS“¤¾ÁFxK‹Â››ÞÜ£T{h¨ZÂÃÕ®“‘‘}:0$)%%Ev»]K—.”Ï$ÌøÄl6+##Cª©©QEE…***T__/©'Øp™Íj˜2¥gü©fE´´ÈÜÜ,c[áF€r‡N“Q-áár™Í^ǹ »Ý.óÆøŠ00`6›M6›MYYYjhhPEE…ÊËËU]]íÓ:!âìâ1’$Ck«ŒN§ŒmN…·´°¨ê ’Ód’ÓhTKD„Z"½v^H=SHìv»‡$À8a`PY­VOdž$UVVª¢¢BUUUž® éÃΦsŸ 8Â::ennVhGko “öÐP9¹LfµršŒ}Ö¼8_BB‚‡e³Ù†í\ 3CʽƆ[ee¥ªªªT[[«ªª*¹\.Ï{GOGPW—ŒmN…v´+¬£S!íí íè`ªŠÜ¡¡ê Ssx¸NºàT‘sEEE)))IñññÃ^œ00¬Î7T[[«ÚÚZíÙ³GžE%©;8¸gŠŠ"¼Ï|ªY’dt¶)øt·'ìúô˜›ºÒ.Iž°¢=4D¡aê ½d—Źbcc/‹Å¢ÄÄDÅÇÇé´‘ËE˜QV«UV«U‡£×öÊÊJÕÕÕyÂŽóC·žu8>ü¯7!íí mï™®âîðp377÷ëî 7w8áÖf4êôøñ’$§ÉxÁu,.%!!A&“É\X,–^A“¿"ÌcBkk«êêê(ÄàdA`Ô8¿ƒÃ­¦¦Fmmmª««S[[›öìÙ#Iª««ë5eå\aaž0 o‡GL¿ÎçÜ@dÐþÎ@ÑQQQ²X,²Z­²X,2™LŠ‹‹óHŠ0Œ ………*++£0 ¸×j¸PACCƒ$IUUU’¤¶¶6ÕÖÖöNg¯…HûëÜ@d¤ÅÆÆÊh4J’’’’$ÉTH ø°âR3£Â¸  ÍÍü´’>™©i)WÊd±JãÆ©õØQªxG;7ýAÕ¯½<¨ŸùÝõ´¤>03â¢ÛàÜÿ¨ÅØqEò|ŠŒQçþïÏ” w§‡[cc£½Žuw ÷ôoâãã=a…ûµ?­Y1Ò3Ï2+I7?ö´¬³ûþcebÜtMŒ›®9ÿö)ؾU›¾r»œ'ŽÊ:,ºýKúÄ÷"H¹Ô÷erŒÒVÜH!F¹ ‘“4!ÊB!ôËH>•¾!Ì´¨³tûŸ‹e˜©S‡êTöøÏ´oëßtêH½‚‚ÆkÊÜ-ʽSsn¸I3–^£œõ›´þÓêîìuµ˜:ßÁ¢BÂ Š›9›BÀ‚( ýûÏ'ÄH(Ù¦Ç?q•Þ^ÿk5ܯÓííêt¶©î2=wO®žÿÚèÌéÓŠIY¨ÅwÜ3*k30Œ tfÖ¬«W(&e¡ZŽiÓÝ·©£µå‚cwýQÓì‹8WMö÷yÆÒkôѼ»4Í~• &ÊÕ|RGvUhÇ3O¨ê•|®ÓWiÉ÷(îª4™"'Éyò„•¿¥7÷ˆjÊÞ¸à~ÓS—食¿Ûs^§×içŸ~¯ÒǦÓíí’¤Ô/]é߼߳{ݎ羚«ÝEòéúÂ&LÔ7þY§†½•úÍõK´b̓š³òS 5™´î#Sùò |‘¼ò&IÒŽg~Û¯u0Šÿ÷Û^·/ù⽺ö;?èµÍ4ɪ+–eèŠe*yô!m]w¿Ïç™òé[µòGhÜÙgÁK’Ù­Ù+VjvÆ úë÷ïÓ;ï³ßUŸ¿K+þûGÒ¸qžmQӯв¯ÿ·W¬ÔúU+<ÆÅør}]®žG[†Jýâ×´èö/ñ…~ƒi&€€ë¸J’´oë+>còœy=] gΨôW?Õ¯3®Ô“£õè²ùÚºî~éîVÚ—¿¡ig?ërEÚfêúþ\’TòèCzìšz0ÙªG>>O[×ݯî®.­Xó &]Ñ{ ëìde|÷uwuéå5_×OÓõã9“õÔêëÔ¸o¦Îwèc_ù¦$©ô±‡{-úùÀÌ=03B»‹þäóõ>»¦HˆÑ¬…Ÿ½C/}ç­›;…® @˜À@DL‰‘$5î«òù où¼Æ¯Šg×k˃kÕ¸¯J]í.¨= ’GÒ;OýF7N V}Χã/ºí‹¦mÿ@[×ÝïYÏãdÝA•<úÞøÅ,Çgòzíwåg¿ qãǫ䱟hÇÓ¿‘óÄqu¹œª}k»žûjž:mŠ¿*mè®ïÌI’ÉbUÕ+/ªâÿ ÔélSg[+_<@˜€¯BŒ=ÏZïÀì¸E=@ų뽾¿sÓzÆ]¹Ä§ãOO»ºç8þ½×÷w=÷lϸÅëµÝvöõû/mê³ÏÑÝ;µnî=sý°\ß® ù²¿Âš€€ÕÞrJÆÈI2Lˆ”³©Ñ§cDÆõ×h¨¯`$ЙXï½°QRÏ£O#m3/9>vábÝñR©æÝ”ãÙv²®F’={Ž×}¬ É’¤gÇ]®¦û%I–óžVr)'jöì7+i@5êë „€€U÷v©”lSˆÑ¤UÿŸÂ£§\p씹)ºùѧ4yί”›oé³Ïä9óôÍÊcºýϯz=æ¸ñã‡íúFa  ½˜§Z>8¢è¤¹úÂ+o)õÎÿ”eV’‚à 2LŒRÌ‚+µbíuÛÿ¦ð)1:²«B¯=¸Ö³ÿާ«î®.-Xõ9]“ÿ=EÚf*8Ì ¨³tõ7ÖȾúvuwuiÇ3¿õéüv<ó„:mJ¾á&Ýø³'5c–‚BB=E o½CŸúåMš™ °ˆ‰½ö+ÿÃït¦»[ V}NiwÝ'Ó$«‚ FÙLÿþ‹'fPÝ;oöÚ§ÓÙ&IšsÃM ‘Ù=ä×0X3ÐNªÓú›3tó£Oiê|‡Ò¿õ?JÿÖÿx[õJ‘žÿÚêr9=Ûª+UüÃïèß[§´»îSÚ]÷õÞéÌÿàÛjØ[éÓù¬«Ñ‹ùwêÆŸ>¡Ü˜­Ü˜ÝgÌ‘]Úö“Þç|lÏnmyp­®ýÎtMþ÷tMþ÷z½ßP]©í¿\×kÛáwwx·fF éõŒ @À;YwP¿ËZ¦änÒœnÒ4û•2[¢5.(HÍG©æ%z{ý¯udg¹×ýß.ø•ŽíÙ­ÅwÜ£iöE=z=Ѥúezó‰GT{vª†¯ÞÿË&5ì­Ôâ/Þ«K–É=EÝ]jÜW¥÷Šþ¨·×ÿJ§;:úìWöøÏõÁž÷ôÑÏEÓ,T¨)\§×éý—žÓöGÖõYÐsóÝ«ø…¦Îw¨»«ËóØÕ¡¾> |TùÒ&U¾´É§}–¾®ƒ¥¯_Ö>ÌŒè×6I:Võ¾^¼ïÎË>¯ýÛŠµ[q¿Æ6î«ÒS«¯Òë𬙠a(L3cJG»Kõª)|úîüaSŽpX/<ý… €1ÍŒ V«•"`P˜L&Š#ŒÎ 0&äåå)55•B`Àâãã)Œ0šáNÚÈIDAT 0f$''SF¦™€€B˜ a(„  f€€B˜ a(Á”p1§Má:1k>…€ßjšLc3uÚ®“„ð#L3x•––FPRRR(cĸ3gΜ¡ PЙ a(„  f€€òÿñGÅ8ýž–IEND®B`‚ceilometer-6.1.5/doc/Makefile0000664000567000056710000001402313072744706017233 0ustar jenkinsjenkins00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " wadl to build a WADL file for api.openstack.org" clean: -rm -rf $(BUILDDIR)/* html: check-dependencies $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: check-dependencies check-dependencies: @python -c 'import sphinxcontrib.autohttp.flask' >/dev/null 2>&1 || (echo "ERROR: Missing Sphinx dependencies. Run: pip install sphinxcontrib-httpdomain" && exit 1) @ld -ltidy >/dev/null 2>&1 || (echo "Error: Missing libtidy dependencies. Pls. install libtidy with system package manager" && exit 1) wadl: $(SPHINXBUILD) -b docbook $(ALLSPHINXOPTS) $(BUILDDIR)/wadl @echo @echo "Build finished. The WADL pages are in $(BUILDDIR)/wadl." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Ceilometer.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Ceilometer.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Ceilometer" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Ceilometer" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." ceilometer-6.1.5/releasenotes/0000775000567000056710000000000013072745164017516 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/releasenotes/source/0000775000567000056710000000000013072745164021016 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/releasenotes/source/index.rst0000664000567000056710000000020613072744706022656 0ustar jenkinsjenkins00000000000000========================= Ceilometer Release Notes ========================= .. toctree:: :maxdepth: 1 liberty unreleased ceilometer-6.1.5/releasenotes/source/conf.py0000664000567000056710000002170213072744706022320 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Ceilometer Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'oslosphinx', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Ceilometer Release Notes' copyright = u'2015, Ceilometer Developers' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. from ceilometer.version import version_info as ceilometer_version # The full version, including alpha/beta/rc tags. release = ceilometer_version.version_string_with_vcs() # The short X.Y version. version = ceilometer_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'CeilometerReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'CeilometerReleaseNotes.tex', u'Ceilometer Release Notes Documentation', u'Ceilometer Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ceilometerreleasenotes', u'Ceilometer Release Notes Documentation', [u'Ceilometer Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'CeilometerReleaseNotes', u'Ceilometer Release Notes Documentation', u'Ceilometer Developers', 'CeilometerReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False ceilometer-6.1.5/releasenotes/source/_static/0000775000567000056710000000000013072745164022444 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/releasenotes/source/_static/.placeholder0000664000567000056710000000000013072744703024713 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/releasenotes/source/_templates/0000775000567000056710000000000013072745164023153 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/releasenotes/source/_templates/.placeholder0000664000567000056710000000000013072744703025422 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/releasenotes/source/unreleased.rst0000664000567000056710000000016013072744706023675 0ustar jenkinsjenkins00000000000000============================== Current Series Release Notes ============================== .. release-notes:: ceilometer-6.1.5/releasenotes/source/liberty.rst0000664000567000056710000000022213072744706023217 0ustar jenkinsjenkins00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty ceilometer-6.1.5/releasenotes/notes/0000775000567000056710000000000013072745164020646 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml0000664000567000056710000000055313072744706030366 0ustar jenkinsjenkins00000000000000 --- fixes: - > [`bug 1536498 `_] Patch to fix duplicate meter definitions causing duplicate samples. If a duplicate is found, log a warning and skip the meter definition. Note that the first occurance of a meter will be used and any following duplicates will be skipped from processing. ceilometer-6.1.5/releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml0000664000567000056710000000054413072744706031444 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1480333 `_] Support ability to configure collector to capture events or meters mutally exclusively, rather than capturing both always. other: - > Configure individual dispatchers by specifying meter_dispatchers and event_dispatchers in configuration file. ceilometer-6.1.5/releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml0000664000567000056710000000066413072744703030454 0ustar jenkinsjenkins00000000000000--- critical: - > [`bug 1533787 `_] Fix an issue where agents are not properly getting registered to group when multiple notification agents are deployed. This can result in bad transformation as the agents are not coordinated. It is still recommended to set heartbeat_timeout_threshold = 0 in [oslo_messaging_rabbit] section when deploying multiple agents. ceilometer-6.1.5/releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml0000664000567000056710000000033213072744703027605 0ustar jenkinsjenkins00000000000000--- upgrade: - > Run db-sync to add new indices. fixes: - > [`bug 1526793 `_] Additional indices were added to better support querying of event data. ceilometer-6.1.5/releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml0000664000567000056710000000034113072744703027725 0ustar jenkinsjenkins00000000000000--- critical: - > [`bug 1519767 `_] fnmatch functionality in python <= 2.7.9 is not threadsafe. this issue and its potential race conditions are now patched. ceilometer-6.1.5/releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml0000664000567000056710000000037413072744703031403 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1539163 `_] Add ability to define whether to use first or last timestamps when aggregating samples. This will allow more flexibility when chaining transformers. ceilometer-6.1.5/releasenotes/notes/event-type-race-c295baf7f1661eab.yaml0000664000567000056710000000024513072744703027115 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1254800 `_] Add better support to catch race conditions when creating event_types ceilometer-6.1.5/releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml0000664000567000056710000000117413072744703030543 0ustar jenkinsjenkins00000000000000--- features: - > Support for polling Neutron's LBaaS v2 API was added as v1 API in Neutron is deprecated. The same metrics are available between v1 and v2. issues: - > Neutron API is not designed to be polled against. When polling against Neutron is enabled, Ceilometer's polling agents may generage a significant load against the Neutron API. It is recommended that a dedicated API be enabled for polling while Neutron's API is improved to handle polling. upgrade: - > By default, Ceilometer will poll the v2 API. To poll legacy v1 API, add neutron_lbaas_version=v1 option to configuration file. ceilometer-6.1.5/releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml0000664000567000056710000000037313072744703027205 0ustar jenkinsjenkins00000000000000--- features: - > Support for CADF-only payload in HTTP dispatcher is dropped as audit middleware in pyCADF was dropped in Kilo cycle. upgrade: - > audit middleware in keystonemiddleware library should be used for similar support. ceilometer-6.1.5/releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml0000664000567000056710000000033513072744703030500 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1506738 `_] [`bug 1509677 `_] Optimise SQL backend queries to minimise query load ceilometer-6.1.5/releasenotes/notes/keystone-v3-fab1e257c5672965.yaml0000664000567000056710000000010313072744703026056 0ustar jenkinsjenkins00000000000000--- features: - > Add support for Keystone v3 authentication ceilometer-6.1.5/releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml0000664000567000056710000000052413072744703026417 0ustar jenkinsjenkins00000000000000--- features: - > Support for CORS is added. More information can be found [`here `_] upgrade: - > The api-paste.ini file can be modified to include or exclude the CORs middleware. Additional configurations can be made to middleware as well. ceilometer-6.1.5/releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml0000664000567000056710000000054113072744703031555 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1536699 `_] Patch to fix volume field lookup in meter definition file. In case the field is missing in the definition, it raises a keyerror and aborts. Instead we should skip the missing field meter and continue with the rest of the definitions. ceilometer-6.1.5/releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml0000664000567000056710000000041213072744703031217 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1506959 `_] Add support to query unique set of meter names rather than meters associated with each resource. The list is available by adding unique=True option to request. ceilometer-6.1.5/releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml0000664000567000056710000000032213072744703027730 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1518338 `_] Add support for storing SNMP metrics in Gnocchi.This functionality requires Gnocchi v2.1.0 to be installed. ceilometer-6.1.5/releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml0000664000567000056710000000054513072744703033472 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1542189 `_] Handle malformed resource definitions in gnocchi_resources.yaml gracefully. Currently we raise an exception once we hit a bad resource and skip the rest. Instead the patch skips the bad resource and proceeds with rest of the definitions. ceilometer-6.1.5/releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml0000664000567000056710000000063513072744703031207 0ustar jenkinsjenkins00000000000000--- features: - > To minimise load on Nova API, an additional configuration option was added to control discovery interval vs metric polling interval. If resource_update_interval option is configured in compute section, the compute agent will discover new instances based on defined interval. The agent will continue to poll the discovered instances at the interval defined by pipeline. ceilometer-6.1.5/releasenotes/notes/support-None-query-45abaae45f08eda4.yaml0000664000567000056710000000023713072744703027733 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1388680 `_] Suppose ability to query for None value when using SQL backend. ceilometer-6.1.5/releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml0000664000567000056710000000022113072744703030372 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1513731 `_] Add support for hardware cpu_util in snmp.yaml ceilometer-6.1.5/releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml0000664000567000056710000000035113072744703027310 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1550436 `_] Cache json parsers when building parsing logic to handle event and meter definitions. This will improve agent startup and setup time. ceilometer-6.1.5/releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml0000664000567000056710000000020713072744703027070 0ustar jenkinsjenkins00000000000000--- features: - > Ceilometer alarms code is now fully removed from code base. Equivalent functionality is handled by Aodh. ceilometer-6.1.5/releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml0000664000567000056710000000021713072744703030042 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1523124 `_] Fix gnocchi dispatcher to support UDP collector ceilometer-6.1.5/releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml0000664000567000056710000000060013072744703031245 0ustar jenkinsjenkins00000000000000--- upgrade: - > To utilize the new policy support. The policy.json file should be updated accordingly. The pre-existing policy.json file will continue to function as it does if policy changes are not required. fixes: - > [`bug 1504495 `_] Configure ceilometer to handle policy.json rules when possible. ceilometer-6.1.5/releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml0000664000567000056710000000025213072744703032324 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1531626 `_] Ensure aggregator transformer timeout is honoured if size is not provided. ceilometer-6.1.5/releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml0000664000567000056710000000026713072744703026642 0ustar jenkinsjenkins00000000000000--- features: - > Gnocchi dispatcher now uses client rather than direct http requests upgrade: - > gnocchiclient library is now a requirement if using ceilometer+gnocchi. ceilometer-6.1.5/releasenotes/notes/remove-eventlet-6738321434b60c78.yaml0000664000567000056710000000012713072744703026571 0ustar jenkinsjenkins00000000000000--- features: - > Remove eventlet from Ceilometer in favour of threaded approach ceilometer-6.1.5/releasenotes/notes/batch-messaging-d126cc525879d58e.yaml0000664000567000056710000000100613072744703026733 0ustar jenkinsjenkins00000000000000--- features: - > Add support for batch processing of messages from queue. This will allow the collector and notification agent to grab multiple messages per thread to enable more efficient processing. upgrade: - > batch_size and batch_timeout configuration options are added to both [notification] and [collector] sections of configuration. The batch_size controls the number of messages to grab before processing. Similarly, the batch_timeout defines the wait time before processing. ceilometer-6.1.5/releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml0000664000567000056710000000031213072744703026635 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 255569 `_] Fix caching support in Gnocchi dispatcher. Added better locking support to enable smoother cache access. ceilometer-6.1.5/releasenotes/notes/always-requeue-7a2df9243987ab67.yaml0000664000567000056710000000111013072744703026645 0ustar jenkinsjenkins00000000000000--- critical: - > The previous configuration options default for `requeue_sample_on_dispatcher_error' and `requeue_event_on_dispatcher_error' allowed to lose data very easily: if the dispatcher failed to send data to the backend (e.g. Gnocchi is down), then the dispatcher raised and the data were lost forever. This was completely unacceptable, and nobody should be able to configure Ceilometer in that way." upgrade: - > The options `requeue_event_on_dispatcher_error' and `requeue_sample_on_dispatcher_error' have been enabled and removed. ceilometer-6.1.5/releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml0000664000567000056710000000051013072744703027700 0ustar jenkinsjenkins00000000000000--- features: - > RPC collector support is dropped. The queue-based notifier publisher and collector was added as the recommended alternative as of Icehouse cycle. upgrade: - > Pipeline.yaml files for agents should be updated to notifier:// or udp:// publishers. The rpc:// publisher is no longer supported. ceilometer-6.1.5/releasenotes/notes/.placeholder0000664000567000056710000000000013072744703023115 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml0000664000567000056710000000031413072744703031356 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1530793 `_] network.services.lb.incoming.bytes meter was previous set to incorrect type. It should be a gauge meter. ceilometer-6.1.5/releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml0000664000567000056710000000061213072744703030363 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1536338 `_] Patch was added to fix the broken floatingip pollster that polled data from nova api, but since the nova api filtered the data by tenant, ceilometer was not getting any data back. The fix changes the pollster to use the neutron api instead to get the floating ip info. ceilometer-6.1.5/releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml0000664000567000056710000000050613072744703031306 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1532661 `_] Fix statistics query failures due to large numbers stored in MongoDB. Data from MongoDB is returned as Int64 for big numbers when int and float types are expected. The data is cast to appropriate type to handle large data. ceilometer-6.1.5/releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml0000664000567000056710000000023613072744703030122 0ustar jenkinsjenkins00000000000000--- upgrade: - > gnocchi_resources.yaml in Ceilometer should be updated. fixes: - > Fix samples from Heat to map to correct Gnocchi resource type ceilometer-6.1.5/releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml0000664000567000056710000000052513072744703026346 0ustar jenkinsjenkins00000000000000--- features: - > Support resource caching in Gnocchi dispatcher to improve write performance to avoid additional queries. other: - > A dogpile.cache supported backend is required to enable cache. Additional configuration `options `_ are also required. ceilometer-6.1.5/functions.sh0000664000567000056710000000115213072744706017371 0ustar jenkinsjenkins00000000000000function clean_exit(){ local error_code="$?" rm -rf "$1" kill $(jobs -p) return $error_code } check_for_cmd () { if ! which "$1" >/dev/null 2>&1 then echo "Could not find $1 command" 1>&2 exit 1 fi } wait_for_line () { exit_code=1 while read line do echo "$line" | grep -q "$1" && exit_code=0 && break done < "$2" # Read the fifo for ever otherwise process would block cat "$2" >/dev/null & if [ $exit_code -eq 1 ]; then echo "Entries of \"$1\" have not been found. Now tests will be stopped." exit $exit_code fi } ceilometer-6.1.5/.coveragerc0000664000567000056710000000014213072744703017141 0ustar jenkinsjenkins00000000000000[run] branch = True source = ceilometer omit = ceilometer/tests/* [report] ignore_errors = True ceilometer-6.1.5/.mailmap0000664000567000056710000000370613072744703016452 0ustar jenkinsjenkins00000000000000# Format is: # # Adam Gandelman Alan Pevec Alexei Kornienko ChangBo Guo(gcb) Chang Bo Guo Chinmaya Bharadwaj chinmay Clark Boylan Doug Hellmann Fei Long Wang Fengqian Gao Fengqian Fengqian Gao Fengqian.Gao Gordon Chung gordon chung Gordon Chung Gordon Chung Gordon Chung gordon chung Ildiko Vancsa Ildiko John H. Tran John Tran Julien Danjou LiuSheng liu-sheng Mehdi Abaakouk Nejc Saje Nejc Saje Nicolas Barcet (nijaba) Pádraig Brady Rich Bowen Sandy Walsh Sascha Peilicke Sean Dague Shengjie Min shengjie-min Shuangtai Tian shuangtai Swann Croiset ZhiQiang Fan ceilometer-6.1.5/tools/0000775000567000056710000000000013072745164016165 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/tools/lintstack.py0000775000567000056710000001433413072744706020544 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2012, AT&T Labs, Yun Mao # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """pylint error checking.""" from __future__ import print_function import json import re import sys from pylint import lint from six.moves import cStringIO as StringIO # noqa # These variables will be useful if we will need to skip some pylint checks ignore_codes = [] ignore_messages = [] ignore_modules = [] KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions" class LintOutput(object): _cached_filename = None _cached_content = None def __init__(self, filename, lineno, line_content, code, message, lintoutput): self.filename = filename self.lineno = lineno self.line_content = line_content self.code = code self.message = message self.lintoutput = lintoutput @classmethod def from_line(cls, line): m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line) matched = m.groups() filename, lineno, code, message = (matched[0], int(matched[1]), matched[2], matched[-1]) if cls._cached_filename != filename: with open(filename) as f: cls._cached_content = list(f.readlines()) cls._cached_filename = filename line_content = cls._cached_content[lineno - 1].rstrip() return cls(filename, lineno, line_content, code, message, line.rstrip()) @classmethod def from_msg_to_dict(cls, msg): """From the output of pylint msg, to a dict. Each key is a unique error identifier, value is a list of LintOutput """ result = {} for line in msg.splitlines(): obj = cls.from_line(line) if obj.is_ignored(): continue key = obj.key() if key not in result: result[key] = [] result[key].append(obj) return result def is_ignored(self): if self.code in ignore_codes: return True if any(self.filename.startswith(name) for name in ignore_modules): return True if any(msg in self.message for msg in ignore_messages): return True return False def key(self): if self.code in ["E1101", "E1103"]: # These two types of errors are like Foo class has no member bar. # We discard the source code so that the error will be ignored # next time another Foo.bar is encountered. return self.message, "" return self.message, self.line_content.strip() def json(self): return json.dumps(self.__dict__) def review_str(self): return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n" "%(code)s: %(message)s" % { "filename": self.filename, "lineno": self.lineno, "line_content": self.line_content, "code": self.code, "message": self.message, }) class ErrorKeys(object): @classmethod def print_json(cls, errors, output=sys.stdout): print("# automatically generated by tools/lintstack.py", file=output) for i in sorted(errors.keys()): print(json.dumps(i), file=output) @classmethod def from_file(cls, filename): keys = set() for line in open(filename): if line and line[0] != "#": d = json.loads(line) keys.add(tuple(d)) return keys def run_pylint(): buff = StringIO() args = ["--msg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg}", "-E", "ceilometer"] lint.Run(args, exit=False) val = buff.getvalue() buff.close() return val def generate_error_keys(msg=None): print("Generating", KNOWN_PYLINT_EXCEPTIONS_FILE) if msg is None: msg = run_pylint() errors = LintOutput.from_msg_to_dict(msg) with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f: ErrorKeys.print_json(errors, output=f) def validate(newmsg=None): print("Loading", KNOWN_PYLINT_EXCEPTIONS_FILE) known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE) if newmsg is None: print("Running pylint. Be patient...") newmsg = run_pylint() errors = LintOutput.from_msg_to_dict(newmsg) print("Unique errors reported by pylint: was %d, now %d." % (len(known), len(errors))) passed = True for err_key, err_list in errors.items(): for err in err_list: if err_key not in known: print(err.lintoutput) print() passed = False if passed: print("Congrats! pylint check passed.") redundant = known - set(errors.keys()) if redundant: print("Extra credit: some known pylint exceptions disappeared.") for i in sorted(redundant): print(json.dumps(i)) print("Consider regenerating the exception file if you will.") else: print("Please fix the errors above. If you believe they are false" " positives, run 'tools/lintstack.py generate' to overwrite.") sys.exit(1) def usage(): print("""Usage: tools/lintstack.py [generate|validate] To generate pylint_exceptions file: tools/lintstack.py generate To validate the current commit: tools/lintstack.py """) def main(): option = "validate" if len(sys.argv) > 1: option = sys.argv[1] if option == "generate": generate_error_keys() elif option == "validate": validate() else: usage() if __name__ == "__main__": main() ceilometer-6.1.5/tools/ceilometer-test-event.py0000775000567000056710000000463013072744706022772 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command line tool help you debug your event definitions. Feed it a list of test notifications in json format, and it will show you what events will be generated. """ import json import sys from oslo_config import cfg from stevedore import extension from ceilometer.event import converter from ceilometer import service cfg.CONF.register_cli_opts([ cfg.StrOpt('input-file', short='i', help='File to read test notifications from.' ' (Containing a json list of notifications.)' ' defaults to stdin.'), cfg.StrOpt('output-file', short='o', help='File to write results to. Defaults to stdout.'), ]) TYPES = {1: 'text', 2: 'int', 3: 'float', 4: 'datetime'} service.prepare_service() output_file = cfg.CONF.output_file input_file = cfg.CONF.input_file if output_file is None: out = sys.stdout else: out = open(output_file, 'w') if input_file is None: notifications = json.load(sys.stdin) else: with open(input_file, 'r') as f: notifications = json.load(f) out.write("Definitions file: %s\n" % cfg.CONF.event.definitions_cfg_file) out.write("Notifications tested: %s\n" % len(notifications)) event_converter = converter.setup_events( extension.ExtensionManager( namespace='ceilometer.event.trait_plugin')) for notification in notifications: event = event_converter.to_event(notification) if event is None: out.write("Dropped notification: %s\n" % notification['message_id']) continue out.write("Event: %s at %s\n" % (event.event_type, event.generated)) for trait in event.traits: dtype = TYPES[trait.dtype] out.write(" Trait: name: %s, type: %s, value: %s\n" % ( trait.name, dtype, trait.value)) ceilometer-6.1.5/tools/show_data.py0000775000567000056710000000710113072744706020513 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Copyright 2012 New Dream Network (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg import six from ceilometer import storage def show_users(db, args): for u in sorted(db.get_users()): print(u) def show_resources(db, args): if args: users = args else: users = sorted(db.get_users()) for u in users: print(u) for resource in db.get_resources(user=u): print(' %(resource_id)s %(timestamp)s' % resource) for k, v in sorted(six.iteritems(resource['metadata'])): print(' %-10s : %s' % (k, v)) for meter in resource['meter']: totals = db.get_statistics(storage.SampleFilter( user=u, meter=meter['counter_name'], resource=resource['resource_id'], )) # FIXME(dhellmann): Need a way to tell whether to use # max() or sum() by meter name without hard-coding. if meter['counter_name'] in ['cpu', 'disk']: value = totals[0]['max'] else: value = totals[0]['sum'] print(' %s (%s): %s' % (meter['counter_name'], meter['counter_type'], value)) def show_total_resources(db, args): if args: users = args else: users = sorted(db.get_users()) for u in users: print(u) for meter in ['disk', 'cpu', 'instance']: stats = db.get_statistics(storage.SampleFilter( user=u, meter=meter, )) if meter in ['cpu', 'disk']: total = stats['max'] else: total = stats['sum'] print(' ', meter, total) def show_raw(db, args): fmt = ' %(timestamp)s %(counter_name)10s %(counter_volume)s' for u in sorted(db.get_users()): print(u) for resource in db.get_resources(user=u): print(' ', resource['resource_id']) for sample in db.get_samples(storage.SampleFilter( user=u, resource=resource['resource_id'], )): print(fmt % sample) def show_help(db, args): print('COMMANDS:') for name in sorted(COMMANDS.keys()): print(name) def show_projects(db, args): for u in sorted(db.get_projects()): print(u) COMMANDS = { 'users': show_users, 'projects': show_projects, 'help': show_help, 'resources': show_resources, 'total_resources': show_total_resources, 'raw': show_raw, } def main(argv): extra_args = cfg.CONF( sys.argv[1:], # NOTE(dhellmann): Read the configuration file(s) for the # ceilometer collector by default. default_config_files=['/etc/ceilometer/ceilometer.conf'], ) db = storage.get_connection_from_config(cfg.CONF) command = extra_args[0] if extra_args else 'help' COMMANDS[command](db, extra_args[1:]) if __name__ == '__main__': main(sys.argv) ceilometer-6.1.5/tools/make_test_event_data.py0000775000567000056710000000733213072744706022716 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command line tool for creating event test data for Ceilometer. Usage: Generate testing data for e.g. for default time span source .tox/py27/bin/activate ./tools/make_test_event_data.py --event_types 3 """ import argparse import datetime import logging import random import sys import uuid from oslo_config import cfg from oslo_utils import timeutils from ceilometer.event.storage import models from ceilometer import storage def make_test_data(conn, start, end, interval, event_types): # Compute start and end timestamps for the new data. if isinstance(start, datetime.datetime): timestamp = start else: timestamp = timeutils.parse_strtime(start) if not isinstance(end, datetime.datetime): end = timeutils.parse_strtime(end) increment = datetime.timedelta(minutes=interval) print('Adding new events') n = 0 while timestamp <= end: data = [] for i in range(event_types): traits = [models.Trait('id1_%d' % i, 1, str(uuid.uuid4())), models.Trait('id2_%d' % i, 2, random.randint(1, 10)), models.Trait('id3_%d' % i, 3, random.random()), models.Trait('id4_%d' % i, 4, timestamp)] data.append(models.Event(str(uuid.uuid4()), 'event_type%d' % i, timestamp, traits, {})) n += 1 conn.record_events(data) timestamp = timestamp + increment print('Added %d new events' % n) def main(): cfg.CONF([], project='ceilometer') parser = argparse.ArgumentParser( description='generate event data', ) parser.add_argument( '--interval', default=10, type=int, help='The period between events, in minutes.', ) parser.add_argument( '--start', default=31, type=int, help='The number of days in the past to start timestamps.', ) parser.add_argument( '--end', default=2, type=int, help='The number of days into the future to continue timestamps.', ) parser.add_argument( '--event_types', default=3, type=int, help='The number of unique event_types.', ) args = parser.parse_args() # Set up logging to use the console console = logging.StreamHandler(sys.stderr) console.setLevel(logging.DEBUG) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) root_logger = logging.getLogger('') root_logger.addHandler(console) root_logger.setLevel(logging.DEBUG) # Connect to the event database conn = storage.get_connection_from_config(cfg.CONF, 'event') # Compute the correct time span start = datetime.datetime.utcnow() - datetime.timedelta(days=args.start) end = datetime.datetime.utcnow() + datetime.timedelta(days=args.end) make_test_data(conn=conn, start=start, end=end, interval=args.interval, event_types=args.event_types) if __name__ == '__main__': main() ceilometer-6.1.5/tools/send_test_data.py0000775000567000056710000001101613072744706021523 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command line tool for sending test data for Ceilometer via oslo.messaging. Usage: Send messages with samples generated by make_test_data source .tox/py27/bin/activate ./tools/send_test_data.py --count 1000 --resources_count 10 --topic metering """ import argparse import datetime import functools import json import random import uuid import make_test_data from oslo_config import cfg import oslo_messaging from six import moves from ceilometer import messaging from ceilometer.publisher import utils from ceilometer import service def send_batch_notifier(notifier, topic, batch): notifier.sample({}, event_type=topic, payload=batch) def get_notifier(config_file): service.prepare_service(argv=['/', '--config-file', config_file]) return oslo_messaging.Notifier( messaging.get_transport(), driver='messagingv2', publisher_id='telemetry.publisher.test', topic='metering', ) def generate_data(send_batch, make_data_args, samples_count, batch_size, resources_count, topic): make_data_args.interval = 1 make_data_args.start = (datetime.datetime.utcnow() - datetime.timedelta(minutes=samples_count)) make_data_args.end = datetime.datetime.utcnow() make_data_args.resource_id = None resources_list = [str(uuid.uuid4()) for _ in moves.xrange(resources_count)] resource_samples = {resource: 0 for resource in resources_list} batch = [] count = 0 for sample in make_test_data.make_test_data(**make_data_args.__dict__): count += 1 resource = resources_list[random.randint(0, len(resources_list) - 1)] resource_samples[resource] += 1 sample['resource_id'] = resource # need to change the timestamp from datetime.datetime type to iso # format (unicode type), because collector will change iso format # timestamp to datetime.datetime type before recording to db. sample['timestamp'] = sample['timestamp'].isoformat() # need to recalculate signature because of the resource_id change sig = utils.compute_signature(sample, cfg.CONF.publisher.telemetry_secret) sample['message_signature'] = sig batch.append(sample) if len(batch) == batch_size: send_batch(topic, batch) batch = [] if count == samples_count: send_batch(topic, batch) return resource_samples send_batch(topic, batch) return resource_samples def get_parser(): parser = argparse.ArgumentParser() parser.add_argument( '--batch-size', dest='batch_size', type=int, default=100 ) parser.add_argument( '--config-file', default='/etc/ceilometer/ceilometer.conf' ) parser.add_argument( '--topic', default='perfmetering' ) parser.add_argument( '--samples-count', dest='samples_count', type=int, default=1000 ) parser.add_argument( '--resources-count', dest='resources_count', type=int, default=100 ) parser.add_argument( '--result-directory', dest='result_dir', default='/tmp' ) return parser def main(): args = get_parser().parse_known_args()[0] make_data_args = make_test_data.get_parser().parse_known_args()[0] notifier = get_notifier(args.config_file) send_batch = functools.partial(send_batch_notifier, notifier) result_dir = args.result_dir del args.notify del args.config_file del args.result_dir resource_writes = generate_data(send_batch, make_data_args, **args.__dict__) result_file = "%s/sample-by-resource-%s" % (result_dir, random.getrandbits(32)) with open(result_file, 'w') as f: f.write(json.dumps(resource_writes)) return result_file if __name__ == '__main__': main() ceilometer-6.1.5/tools/make_test_data.py0000775000567000056710000001647113072744706021521 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command line tool for creating test data for Ceilometer. Usage: Generate testing data for e.g. for default time span source .tox/py27/bin/activate ./tools/make_test_data.py --user 1 --project 1 --resource 1 --counter cpu_util --volume 20 """ import argparse import datetime import logging import random import sys import uuid from oslo_config import cfg from oslo_utils import timeutils from ceilometer.publisher import utils from ceilometer import sample from ceilometer import storage def make_test_data(name, meter_type, unit, volume, random_min, random_max, user_id, project_id, resource_id, start, end, interval, resource_metadata=None, source='artificial'): resource_metadata = resource_metadata or {'display_name': 'toto', 'host': 'tata', 'image_ref': 'test', 'instance_flavor_id': 'toto', 'server_group': 'toto', } # Compute start and end timestamps for the new data. if isinstance(start, datetime.datetime): timestamp = start else: timestamp = timeutils.parse_strtime(start) if not isinstance(end, datetime.datetime): end = timeutils.parse_strtime(end) increment = datetime.timedelta(minutes=interval) print('Adding new samples for meter %s.' % (name)) # Generate samples n = 0 total_volume = volume while timestamp <= end: if (random_min >= 0 and random_max >= 0): # If there is a random element defined, we will add it to # user given volume. if isinstance(random_min, int) and isinstance(random_max, int): total_volume += random.randint(random_min, random_max) else: total_volume += random.uniform(random_min, random_max) c = sample.Sample(name=name, type=meter_type, unit=unit, volume=total_volume, user_id=user_id, project_id=project_id, resource_id=resource_id, timestamp=timestamp.isoformat(), resource_metadata=resource_metadata, source=source, ) data = utils.meter_message_from_counter( c, cfg.CONF.publisher.telemetry_secret) # timestamp should be string when calculating signature, but should be # datetime object when calling record_metering_data. data['timestamp'] = timestamp yield data n += 1 timestamp = timestamp + increment if (meter_type == 'gauge' or meter_type == 'delta'): # For delta and gauge, we don't want to increase the value # in time by random element. So we always set it back to # volume. total_volume = volume print('Added %d new samples for meter %s.' % (n, name)) def record_test_data(conn, *args, **kwargs): for data in make_test_data(*args, **kwargs): conn.record_metering_data(data) def get_parser(): parser = argparse.ArgumentParser( description='generate metering data', ) parser.add_argument( '--interval', default=10, type=int, help='The period between samples, in minutes.', ) parser.add_argument( '--start', default=31, help='Number of days to be stepped back from now or date in the past (' '"YYYY-MM-DDTHH:MM:SS" format) to define timestamps start range.', ) parser.add_argument( '--end', default=2, help='Number of days to be stepped forward from now or date in the ' 'future ("YYYY-MM-DDTHH:MM:SS" format) to define timestamps end ' 'range.', ) parser.add_argument( '--type', choices=('gauge', 'cumulative'), default='gauge', dest='meter_type', help='Counter type.', ) parser.add_argument( '--unit', default=None, help='Counter unit.', ) parser.add_argument( '--project', dest='project_id', help='Project id of owner.', ) parser.add_argument( '--user', dest='user_id', help='User id of owner.', ) parser.add_argument( '--random_min', help='The random min border of amount for added to given volume.', type=int, default=0, ) parser.add_argument( '--random_max', help='The random max border of amount for added to given volume.', type=int, default=0, ) parser.add_argument( '--resource', dest='resource_id', default=str(uuid.uuid4()), help='The resource id for the meter data.', ) parser.add_argument( '--counter', default='instance', dest='name', help='The counter name for the meter data.', ) parser.add_argument( '--volume', help='The amount to attach to the meter.', type=int, default=1, ) return parser def main(): cfg.CONF([], project='ceilometer') args = get_parser().parse_args() # Set up logging to use the console console = logging.StreamHandler(sys.stderr) console.setLevel(logging.DEBUG) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) root_logger = logging.getLogger('') root_logger.addHandler(console) root_logger.setLevel(logging.DEBUG) # Connect to the metering database conn = storage.get_connection_from_config(cfg.CONF) # Find the user and/or project for a real resource if not (args.user_id or args.project_id): for r in conn.get_resources(): if r.resource_id == args.resource_id: args.user_id = r.user_id args.project_id = r.project_id break # Compute the correct time span format = '%Y-%m-%dT%H:%M:%S' try: start = datetime.datetime.utcnow() - datetime.timedelta( days=int(args.start)) except ValueError: try: start = datetime.datetime.strptime(args.start, format) except ValueError: raise try: end = datetime.datetime.utcnow() + datetime.timedelta( days=int(args.end)) except ValueError: try: end = datetime.datetime.strptime(args.end, format) except ValueError: raise args.start = start args.end = end record_test_data(conn=conn, **args.__dict__) return 0 if __name__ == '__main__': main() ceilometer-6.1.5/tools/pretty_tox.sh0000775000567000056710000000065213072744703020746 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash set -o pipefail TESTRARGS=$1 # --until-failure is not compatible with --subunit see: # # https://bugs.launchpad.net/testrepository/+bug/1411804 # # this work around exists until that is addressed if [[ "$TESTARGS" =~ "until-failure" ]]; then python setup.py testr --slowest --testr-args="$TESTRARGS" else python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f fi ceilometer-6.1.5/tools/lintstack.sh0000775000567000056710000000414413072744706020524 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash # Copyright (c) 2012-2013, AT&T Labs, Yun Mao # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Use lintstack.py to compare pylint errors. # We run pylint twice, once on HEAD, once on the code before the latest # commit for review. set -e TOOLS_DIR=$(cd $(dirname "$0") && pwd) # Get the current branch name. GITHEAD=`git rev-parse --abbrev-ref HEAD` if [[ "$GITHEAD" == "HEAD" ]]; then # In detached head mode, get revision number instead GITHEAD=`git rev-parse HEAD` echo "Currently we are at commit $GITHEAD" else echo "Currently we are at branch $GITHEAD" fi cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py if git rev-parse HEAD^2 2>/dev/null; then # The HEAD is a Merge commit. Here, the patch to review is # HEAD^2, the master branch is at HEAD^1, and the patch was # written based on HEAD^2~1. PREV_COMMIT=`git rev-parse HEAD^2~1` git checkout HEAD~1 # The git merge is necessary for reviews with a series of patches. # If not, this is a no-op so won't hurt either. git merge $PREV_COMMIT else # The HEAD is not a merge commit. This won't happen on gerrit. # Most likely you are running against your own patch locally. # We assume the patch to examine is HEAD, and we compare it against # HEAD~1 git checkout HEAD~1 fi # First generate tools/pylint_exceptions from HEAD~1 $TOOLS_DIR/lintstack.head.py generate # Then use that as a reference to compare against HEAD git checkout $GITHEAD $TOOLS_DIR/lintstack.head.py echo "Check passed. FYI: the pylint exceptions are:" cat $TOOLS_DIR/pylint_exceptions ceilometer-6.1.5/tools/test_hbase_table_utils.py0000775000567000056710000000254713072744706023263 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys from oslo_config import cfg from ceilometer import storage def main(argv): cfg.CONF([], project='ceilometer') if os.getenv("CEILOMETER_TEST_STORAGE_URL", "").startswith("hbase://"): url = ("%s?table_prefix=%s" % (os.getenv("CEILOMETER_TEST_STORAGE_URL"), os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX", "test"))) conn = storage.get_connection(url, 'ceilometer.metering.storage') event_conn = storage.get_connection(url, 'ceilometer.event.storage') for arg in argv: if arg == "--upgrade": conn.upgrade() event_conn.upgrade() if arg == "--clear": conn.clear() event_conn.clear() if __name__ == '__main__': main(sys.argv[1:]) ceilometer-6.1.5/tools/__init__.py0000664000567000056710000000000013072744703020262 0ustar jenkinsjenkins00000000000000ceilometer-6.1.5/tools/make_test_data.sh0000775000567000056710000000273713072744706021503 0ustar jenkinsjenkins00000000000000#!/bin/bash bindir=$(dirname $0) project_name="$1" if [ -z "$project_name" ] then project_name=demo fi if [ -z "$OS_USERNAME" ] then user=demo else user=$OS_USERNAME fi # Convert a possible project name to an id, if we have # keystone installed. if which keystone >/dev/null then project=$(keystone tenant-list | grep " $project_name " | cut -f2 -d'|' | cut -f2 -d' ') else # Assume they gave us the project id as argument. project="$project_name" fi if [ -z "$project" ] then echo "Could not determine project id for \"$project_name\"" 1>&2 exit 1 fi early1="2012-08-27T07:00:00" early2="2012-08-27T17:00:00" start="2012-08-28T00:00:00" middle1="2012-08-28T08:00:00" middle2="2012-08-28T18:00:00" middle3="2012-08-29T09:00:00" middle4="2012-08-29T19:00:00" end="2012-08-31T23:59:00" late1="2012-08-31T10:00:00" late2="2012-08-31T20:00:00" mkdata() { ${bindir}/make_test_data.py --project "$project" \ --user "$user" --start "$2" --end "$3" \ --resource "$1" --counter instance --volume 1 } dates=(early1 early2 start middle1 middle2 middle3 middle4 end late1 late2) echo $project for i in $(seq 0 $((${#dates[@]} - 2)) ) do iname=${dates[$i]} eval "ivalue=\$$iname" for j in $(seq $((i + 1)) $((${#dates[@]} - 1)) ) do jname=${dates[$j]} eval "jvalue=\$$jname" resource_id="${project_name}-$iname-$jname" echo "$resource_id" mkdata "$resource_id" "$ivalue" "$jvalue" [ $? -eq 0 ] || exit $? done echo done ceilometer-6.1.5/requirements.txt0000664000567000056710000000336213072744706020316 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. retrying!=1.3.0,>=1.2.3 # Apache-2.0 jsonpath-rw-ext>=0.1.9 # Apache-2.0 jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT kafka-python<1.0.0,>=0.9.5 # Apache-2.0 keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0 lxml>=2.3 # BSD msgpack-python>=0.4.0 # Apache-2.0 oslo.context>=0.2.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 oslo.concurrency>=3.5.0 # Apache-2.0 oslo.config>=3.7.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.policy>=0.5.0 # Apache-2.0 oslo.reports>=0.6.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 oslo.service>=1.0.0 # Apache-2.0 PasteDeploy>=1.5.0 # MIT pbr>=1.6 # Apache-2.0 pecan>=1.0.0 # BSD oslo.messaging>=4.0.0,<=5.17.1 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 oslo.serialization>=1.10.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0 pysnmp<5.0.0,>=4.2.3 # BSD python-ceilometerclient>=2.2.1 # Apache-2.0 python-glanceclient>=2.0.0 # Apache-2.0 python-keystoneclient!=1.8.0,!=2.1.0,<3.0.0,>=1.6.0 # Apache-2.0 keystoneauth1>=2.1.0 # Apache-2.0 python-neutronclient!=4.1.0,>=2.6.0 # Apache-2.0 python-novaclient!=2.33.0,>=2.29.0,<7.0.0 # Apache-2.0 python-swiftclient>=2.2.0 # Apache-2.0 PyYAML>=3.1.0 # MIT requests!=2.9.0,>=2.8.1 # Apache-2.0 six>=1.9.0 # MIT SQLAlchemy<1.1.0,>=1.0.10 # MIT sqlalchemy-migrate>=0.9.6 # Apache-2.0 stevedore>=1.5.0 # Apache-2.0 tooz>=1.28.0 # Apache-2.0 Werkzeug>=0.7 # BSD License WebOb>=1.2.3,<1.7.0 # MIT WSME>=0.8 # MIT # NOTE(jd) We do not import it directly, but WSME datetime string parsing # behaviour changes when this library is installed python-dateutil>=2.4.2 # BSD ceilometer-6.1.5/devstack/0000775000567000056710000000000013072745164016631 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/devstack/README.rst0000664000567000056710000000060613072744706020323 0ustar jenkinsjenkins00000000000000=============================== Enabling Ceilometer in DevStack =============================== 1. Download Devstack:: git clone https://git.openstack.org/openstack-dev/devstack cd devstack 2. Add this repo as an external repository in ``local.conf`` file:: [[local|localrc]] enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer 3. Run ``stack.sh``. ceilometer-6.1.5/devstack/plugin.sh0000664000567000056710000004676513072744706020506 0ustar jenkinsjenkins00000000000000# Install and start **Ceilometer** service in devstack # # To enable Ceilometer in devstack add an entry to local.conf that # looks like # # [[local|localrc]] # enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer # # By default all ceilometer services are started (see # devstack/settings). To disable a specific service use the # disable_service function. # # NOTE: Currently, there are two ways to get the IPMI based meters in # OpenStack. One way is to configure Ironic conductor to report those meters # for the nodes managed by Ironic and to have Ceilometer notification # agent to collect them. Ironic by default does NOT enable that reporting # functionality. So in order to do so, users need to set the option of # conductor.send_sensor_data to true in the ironic.conf configuration file # for the Ironic conductor service, and also enable the # ceilometer-anotification service. If you do this disable the IPMI # polling agent: # # disable_service ceilometer-aipmi # # The other way is to use Ceilometer ipmi agent only to get the IPMI based # meters. To avoid duplicated meters, users need to make sure to set the # option of conductor.send_sensor_data to false in the ironic.conf # configuration file if the node on which Ceilometer ipmi agent is running # is also managed by Ironic. # # Several variables set in the localrc section adjust common behaviors # of Ceilometer (see within for additional settings): # # CEILOMETER_PIPELINE_INTERVAL: Seconds between pipeline processing runs. Default 600. # CEILOMETER_BACKEND: Database backend (e.g. 'mysql', 'mongodb', 'es') # CEILOMETER_COORDINATION_URL: URL for group membership service provided by tooz. # CEILOMETER_EVENTS: Set to True to enable event collection # CEILOMETER_EVENT_ALARM: Set to True to enable publisher for event alarming # Save trace setting XTRACE=$(set +o | grep xtrace) set -o xtrace # TODO(liusheng) Temporarily add this to avoid integration test failue, see bug1548634 export SERVICE_TENANT_NAME=$SERVICE_PROJECT_NAME # Support potential entry-points console scripts in VENV or not if [[ ${USE_VENV} = True ]]; then PROJECT_VENV["ceilometer"]=${CEILOMETER_DIR}.venv CEILOMETER_BIN_DIR=${PROJECT_VENV["ceilometer"]}/bin else CEILOMETER_BIN_DIR=$(get_python_exec_prefix) fi # Test if any Ceilometer services are enabled # is_ceilometer_enabled function is_ceilometer_enabled { [[ ,${ENABLED_SERVICES} =~ ,"ceilometer-" ]] && return 0 return 1 } function ceilometer_service_url { echo "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" } # _ceilometer_install_mongdb - Install mongodb and python lib. function _ceilometer_install_mongodb { # Server package is the same on all local packages=mongodb-server if is_fedora; then # mongodb client packages="${packages} mongodb" fi install_package ${packages} if is_fedora; then restart_service mongod else restart_service mongodb fi # give time for service to restart sleep 5 } # _ceilometer_install_redis() - Install the redis server and python lib. function _ceilometer_install_redis { if is_ubuntu; then install_package redis-server restart_service redis-server else # This will fail (correctly) where a redis package is unavailable install_package redis restart_service redis fi pip_install_gr redis } # Configure mod_wsgi function _ceilometer_config_apache_wsgi { sudo mkdir -p $CEILOMETER_WSGI_DIR local ceilometer_apache_conf=$(apache_site_config_for ceilometer) local apache_version=$(get_apache_version) local venv_path="" # Copy proxy vhost and wsgi file sudo cp $CEILOMETER_DIR/ceilometer/api/app.wsgi $CEILOMETER_WSGI_DIR/app if [[ ${USE_VENV} = True ]]; then venv_path="python-path=${PROJECT_VENV["ceilometer"]}/lib/$(python_version)/site-packages" fi sudo cp $CEILOMETER_DIR/devstack/apache-ceilometer.template $ceilometer_apache_conf sudo sed -e " s|%PORT%|$CEILOMETER_SERVICE_PORT|g; s|%APACHE_NAME%|$APACHE_NAME|g; s|%WSGIAPP%|$CEILOMETER_WSGI_DIR/app|g; s|%USER%|$STACK_USER|g; s|%VIRTUALENV%|$venv_path|g " -i $ceilometer_apache_conf } # Install required services for coordination function _ceilometer_prepare_coordination { if echo $CEILOMETER_COORDINATION_URL | grep -q '^memcached:'; then install_package memcached elif [[ "${CEILOMETER_COORDINATOR_URL%%:*}" == "redis" || "${CEILOMETER_CACHE_BACKEND##*.}" == "redis" ]]; then _ceilometer_install_redis fi } # Install required services for storage backends function _ceilometer_prepare_storage_backend { if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then pip_install_gr pymongo _ceilometer_install_mongodb fi if [ "$CEILOMETER_BACKEND" = 'es' ] ; then ${TOP_DIR}/pkg/elasticsearch.sh download ${TOP_DIR}/pkg/elasticsearch.sh install fi } # Install the python modules for inspecting nova virt instances function _ceilometer_prepare_virt_drivers { # Only install virt drivers if we're running nova compute if is_service_enabled n-cpu ; then if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then pip_install_gr libvirt-python fi if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then pip_install_gr oslo.vmware fi fi } # Create ceilometer related accounts in Keystone function _ceilometer_create_accounts { if is_service_enabled ceilometer-api; then create_service_user "ceilometer" "admin" get_or_create_service "ceilometer" "metering" "OpenStack Telemetry Service" get_or_create_endpoint "metering" \ "$REGION_NAME" \ "$(ceilometer_service_url)" \ "$(ceilometer_service_url)" \ "$(ceilometer_service_url)" if is_service_enabled swift; then # Ceilometer needs ResellerAdmin role to access Swift account stats. get_or_add_user_project_role "ResellerAdmin" "ceilometer" $SERVICE_PROJECT_NAME fi fi } # Activities to do before ceilometer has been installed. function preinstall_ceilometer { echo_summary "Preinstall not in virtualenv context. Skipping." } # Remove WSGI files, disable and remove Apache vhost file function _ceilometer_cleanup_apache_wsgi { if is_service_enabled ceilometer-api && [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then sudo rm -f "$CEILOMETER_WSGI_DIR"/* sudo rmdir "$CEILOMETER_WSGI_DIR" sudo rm -f $(apache_site_config_for ceilometer) fi } function _drop_database { if is_service_enabled ceilometer-collector ceilometer-api ; then if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then mongo ceilometer --eval "db.dropDatabase();" elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then curl -XDELETE "localhost:9200/events_*" fi fi } # cleanup_ceilometer() - Remove residual data files, anything left over # from previous runs that a clean run would need to clean up function cleanup_ceilometer { _ceilometer_cleanup_apache_wsgi _drop_database sudo rm -f "$CEILOMETER_CONF_DIR"/* sudo rmdir "$CEILOMETER_CONF_DIR" if is_service_enabled ceilometer-api && [ "$CEILOMETER_USE_MOD_WSGI" == "False" ]; then sudo rm -f "$CEILOMETER_API_LOG_DIR"/* sudo rmdir "$CEILOMETER_API_LOG_DIR" fi } # Set configuraiton for cache backend. # NOTE(cdent): This currently only works for redis. Still working # out how to express the other backends. function _ceilometer_configure_cache_backend { iniset $CEILOMETER_CONF cache backend $CEILOMETER_CACHE_BACKEND iniset $CEILOMETER_CONF cache backend_argument url:$CEILOMETER_CACHE_URL iniadd_literal $CEILOMETER_CONF cache backend_argument distributed_lock:True if [[ "${CEILOMETER_CACHE_BACKEND##*.}" == "redis" ]]; then iniadd_literal $CEILOMETER_CONF cache backend_argument db:0 iniadd_literal $CEILOMETER_CONF cache backend_argument redis_expiration_time:600 fi } # Set configuration for storage backend. function _ceilometer_configure_storage_backend { if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then iniset $CEILOMETER_CONF database event_connection $(database_connection_url ceilometer) iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer) elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then # es is only supported for events. we will use sql for metering. iniset $CEILOMETER_CONF database event_connection es://localhost:9200 iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer) ${TOP_DIR}/pkg/elasticsearch.sh start elif [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then iniset $CEILOMETER_CONF database event_connection mongodb://localhost:27017/ceilometer iniset $CEILOMETER_CONF database metering_connection mongodb://localhost:27017/ceilometer elif [ "$CEILOMETER_BACKEND" = 'gnocchi' ] ; then gnocchi_url=$(gnocchi_service_url) iniset $CEILOMETER_CONF DEFAULT meter_dispatchers gnocchi # FIXME(sileht): We shouldn't load event_dispatchers if store_event is False iniset $CEILOMETER_CONF DEFAULT event_dispatchers "" iniset $CEILOMETER_CONF notification store_events False # NOTE(gordc): set higher retry in case gnocchi is started after ceilometer on a slow machine iniset $CEILOMETER_CONF storage max_retries 20 # NOTE(gordc): set batching to better handle recording on a slow machine iniset $CEILOMETER_CONF collector batch_size 50 iniset $CEILOMETER_CONF collector batch_timeout 5 iniset $CEILOMETER_CONF dispatcher_gnocchi url $gnocchi_url iniset $CEILOMETER_CONF dispatcher_gnocchi archive_policy ${GNOCCHI_ARCHIVE_POLICY} if is_service_enabled swift && [[ "$GNOCCHI_STORAGE_BACKEND" = 'swift' ]] ; then iniset $CEILOMETER_CONF dispatcher_gnocchi filter_service_activity "True" iniset $CEILOMETER_CONF dispatcher_gnocchi filter_project "gnocchi_swift" else iniset $CEILOMETER_CONF dispatcher_gnocchi filter_service_activity "False" fi else die $LINENO "Unable to configure unknown CEILOMETER_BACKEND $CEILOMETER_BACKEND" fi _drop_database } # Configure Ceilometer function configure_ceilometer { local conffile iniset_rpc_backend ceilometer $CEILOMETER_CONF iniset $CEILOMETER_CONF DEFAULT notification_topics "$CEILOMETER_NOTIFICATION_TOPICS" iniset $CEILOMETER_CONF DEFAULT verbose True iniset $CEILOMETER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" if [[ -n "$CEILOMETER_COORDINATION_URL" ]]; then iniset $CEILOMETER_CONF coordination backend_url $CEILOMETER_COORDINATION_URL iniset $CEILOMETER_CONF compute workload_partitioning True iniset $CEILOMETER_CONF notification workload_partitioning True iniset $CEILOMETER_CONF notification workers $API_WORKERS fi if [[ -n "$CEILOMETER_CACHE_BACKEND" ]]; then _ceilometer_configure_cache_backend fi # Install the policy file and declarative configuration files to # the conf dir. # NOTE(cdent): Do not make this a glob as it will conflict # with rootwrap installation done elsewhere and also clobber # ceilometer.conf settings that have already been made. # Anyway, explicit is better than implicit. for conffile in policy.json api_paste.ini pipeline.yaml \ event_definitions.yaml event_pipeline.yaml \ gnocchi_resources.yaml; do cp $CEILOMETER_DIR/etc/ceilometer/$conffile $CEILOMETER_CONF_DIR done iniset $CEILOMETER_CONF oslo_policy policy_file $CEILOMETER_CONF_DIR/policy.json if [ "$CEILOMETER_PIPELINE_INTERVAL" ]; then sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml fi if [ "$CEILOMETER_EVENT_ALARM" == "True" ]; then if ! grep -q '^ *- notifier://?topic=alarm.all$' $CEILOMETER_CONF_DIR/event_pipeline.yaml; then sed -i '/^ *publishers:$/,+1s|^\( *\)-.*$|\1- notifier://?topic=alarm.all\n&|' $CEILOMETER_CONF_DIR/event_pipeline.yaml fi fi # The compute and central agents need these credentials in order to # call out to other services' public APIs. iniset $CEILOMETER_CONF service_credentials auth_type password iniset $CEILOMETER_CONF service_credentials user_domain_id default iniset $CEILOMETER_CONF service_credentials project_domain_id default iniset $CEILOMETER_CONF service_credentials project_name $SERVICE_PROJECT_NAME iniset $CEILOMETER_CONF service_credentials username ceilometer iniset $CEILOMETER_CONF service_credentials password $SERVICE_PASSWORD iniset $CEILOMETER_CONF service_credentials region_name $REGION_NAME iniset $CEILOMETER_CONF service_credentials auth_url $KEYSTONE_SERVICE_URI configure_auth_token_middleware $CEILOMETER_CONF ceilometer $CEILOMETER_AUTH_CACHE_DIR iniset $CEILOMETER_CONF notification store_events $CEILOMETER_EVENTS # Configure storage if is_service_enabled ceilometer-collector ceilometer-api; then _ceilometer_configure_storage_backend iniset $CEILOMETER_CONF collector workers $API_WORKERS fi if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then iniset $CEILOMETER_CONF DEFAULT hypervisor_inspector vsphere iniset $CEILOMETER_CONF vmware host_ip "$VMWAREAPI_IP" iniset $CEILOMETER_CONF vmware host_username "$VMWAREAPI_USER" iniset $CEILOMETER_CONF vmware host_password "$VMWAREAPI_PASSWORD" fi if is_service_enabled ceilometer-api && [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then iniset $CEILOMETER_CONF api pecan_debug "False" _ceilometer_config_apache_wsgi fi if is_service_enabled ceilometer-aipmi; then # Configure rootwrap for the ipmi agent configure_rootwrap ceilometer fi } # init_ceilometer() - Initialize etc. function init_ceilometer { # Get ceilometer keystone settings in place _ceilometer_create_accounts # Create cache dir sudo install -d -o $STACK_USER $CEILOMETER_AUTH_CACHE_DIR rm -f $CEILOMETER_AUTH_CACHE_DIR/* if is_service_enabled ceilometer-collector ceilometer-api && is_service_enabled mysql postgresql ; then if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] || [ "$CEILOMETER_BACKEND" = 'es' ] ; then recreate_database ceilometer $CEILOMETER_BIN_DIR/ceilometer-dbsync fi fi } # Install Ceilometer. # The storage and coordination backends are installed here because the # virtualenv context is active at this point and python drivers need to be # installed. The context is not active during preinstall (when it would # otherwise makes sense to do the backend services). function install_ceilometer { if is_service_enabled ceilometer-acentral ceilometer-anotification ceilometer-alarm-evaluator ; then _ceilometer_prepare_coordination fi if is_service_enabled ceilometer-collector ceilometer-api; then _ceilometer_prepare_storage_backend fi if is_service_enabled ceilometer-acompute ; then _ceilometer_prepare_virt_drivers fi install_ceilometerclient setup_develop $CEILOMETER_DIR sudo install -d -o $STACK_USER -m 755 $CEILOMETER_CONF_DIR if is_service_enabled ceilometer-api && [ "$CEILOMETER_USE_MOD_WSGI" == "False" ]; then sudo install -d -o $STACK_USER -m 755 $CEILOMETER_API_LOG_DIR fi } # install_ceilometerclient() - Collect source and prepare function install_ceilometerclient { if use_library_from_git "python-ceilometerclient"; then git_clone_by_name "python-ceilometerclient" setup_dev_lib "python-ceilometerclient" sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-ceilometerclient"]}/tools/,/etc/bash_completion.d/}ceilometer.bash_completion else pip_install_gr python-ceilometerclient fi } # start_ceilometer() - Start running processes, including screen function start_ceilometer { run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces central --config-file $CEILOMETER_CONF" run_process ceilometer-anotification "$CEILOMETER_BIN_DIR/ceilometer-agent-notification --config-file $CEILOMETER_CONF" run_process ceilometer-aipmi "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces ipmi --config-file $CEILOMETER_CONF" if [[ "$CEILOMETER_USE_MOD_WSGI" == "False" ]]; then run_process ceilometer-api "$CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" elif is_service_enabled ceilometer-api; then enable_apache_site ceilometer restart_apache_server tail_log ceilometer /var/log/$APACHE_NAME/ceilometer.log tail_log ceilometer-api /var/log/$APACHE_NAME/ceilometer_access.log fi # run the collector after restarting apache as it needs # operational keystone if using gnocchi run_process ceilometer-collector "$CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF" # Start the compute agent late to allow time for the collector to # fully wake up and connect to the message bus. See bug #1355809 if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces compute --config-file $CEILOMETER_CONF" $LIBVIRT_GROUP fi if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces compute --config-file $CEILOMETER_CONF" fi # Only die on API if it was actually intended to be turned on if is_service_enabled ceilometer-api; then echo "Waiting for ceilometer-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $(ceilometer_service_url)/v2/; then die $LINENO "ceilometer-api did not start" fi fi } # stop_ceilometer() - Stop running processes function stop_ceilometer { if is_service_enabled ceilometer-api ; then if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then disable_apache_site ceilometer restart_apache_server else stop_process ceilometer-api fi fi # Kill the ceilometer screen windows for serv in ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector; do stop_process $serv done } # This is the main for plugin.sh if is_service_enabled ceilometer; then if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then # Set up other services echo_summary "Configuring system services for Ceilometer" preinstall_ceilometer elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Ceilometer" # Use stack_install_service here to account for vitualenv stack_install_service ceilometer elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring Ceilometer" configure_ceilometer elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Initializing Ceilometer" # Tidy base for ceilometer init_ceilometer # Start the services start_ceilometer fi if [[ "$1" == "unstack" ]]; then echo_summary "Shutting Down Ceilometer" stop_ceilometer fi if [[ "$1" == "clean" ]]; then echo_summary "Cleaning Ceilometer" cleanup_ceilometer fi fi # Restore xtrace $XTRACE ceilometer-6.1.5/devstack/settings0000664000567000056710000000427413072744706020424 0ustar jenkinsjenkins00000000000000# turn on all the ceilometer services by default # Pollsters enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi # Notification Agent enable_service ceilometer-anotification # Data Collector enable_service ceilometer-collector # API service enable_service ceilometer-api # Default directories CEILOMETER_DIR=$DEST/ceilometer CEILOMETER_CONF_DIR=/etc/ceilometer CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer} CEILOMETER_WSGI_DIR=${CEILOMETER_WSGI_DIR:-/var/www/ceilometer} # Set up database backend CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql} # Gnocchi default archive_policy for Ceilometer GNOCCHI_ARCHIVE_POLICY=${GNOCCHI_ARCHIVE_POLICY:-low} # Ceilometer connection info. CEILOMETER_SERVICE_PROTOCOL=http CEILOMETER_SERVICE_HOST=$SERVICE_HOST CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777} CEILOMETER_USE_MOD_WSGI=${CEILOMETER_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}} # To enable OSprofiler change value of this variable to "notifications,profiler" CEILOMETER_NOTIFICATION_TOPICS=${CEILOMETER_NOTIFICATION_TOPICS:-notifications} CEILOMETER_EVENTS=${CEILOMETER_EVENTS:-True} CEILOMETER_COORDINATION_URL=${CEILOMETER_COORDINATION_URL:-redis://localhost:6379} CEILOMETER_PIPELINE_INTERVAL=${CEILOMETER_PIPELINE_INTERVAL:-} # Cache Options # NOTE(cdent): These are incomplete and specific for this testing. CEILOMETER_CACHE_BACKEND=${CEILOMETER_CACHE_BACKEND:-dogpile.cache.redis} CEILOMETER_CACHE_URL=${CEILOMETER_CACHE_URL:-redis://localhost:6379} CEILOMETER_EVENT_ALARM=${CEILOMETER_EVENT_ALARM:-False} # Tell Tempest this project is present TEMPEST_SERVICES+=,ceilometer # Set up default directories for client and middleware GITREPO["python-ceilometerclient"]=${CEILOMETERCLIENT_REPO:-${GIT_BASE}/openstack/python-ceilometerclient.git} GITBRANCH["python-ceilometerclient"]=${CEILOMETERCLIENT_BRANCH:-master} GITDIR["python-ceilometerclient"]=$DEST/python-ceilometerclient GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware # Get rid of this before done. # Tell emacs to use shell-script-mode ## Local variables: ## mode: shell-script ## End: ceilometer-6.1.5/devstack/files/0000775000567000056710000000000013072745164017733 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/devstack/files/rpms/0000775000567000056710000000000013072745164020714 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/devstack/files/rpms/ceilometer0000664000567000056710000000003013072744703022756 0ustar jenkinsjenkins00000000000000selinux-policy-targeted ceilometer-6.1.5/devstack/upgrade/0000775000567000056710000000000013072745164020260 5ustar jenkinsjenkins00000000000000ceilometer-6.1.5/devstack/upgrade/settings0000664000567000056710000000105413072744703022041 0ustar jenkinsjenkins00000000000000register_project_for_upgrade ceilometer devstack_localrc base enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer devstack_localrc base enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api tempest devstack_localrc target enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer devstack_localrc target enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api tempest ceilometer-6.1.5/devstack/upgrade/shutdown.sh0000775000567000056710000000121113072744703022463 0ustar jenkinsjenkins00000000000000#!/bin/bash # # set -o errexit source $GRENADE_DIR/grenaderc source $GRENADE_DIR/functions source $BASE_DEVSTACK_DIR/functions source $BASE_DEVSTACK_DIR/stackrc # needed for status directory source $BASE_DEVSTACK_DIR/lib/tls source $BASE_DEVSTACK_DIR/lib/apache # Locate the ceilometer plugin and get its functions CEILOMETER_DEVSTACK_DIR=$(dirname $(dirname $0)) source $CEILOMETER_DEVSTACK_DIR/plugin.sh set -o xtrace stop_ceilometer # ensure everything is stopped SERVICES_DOWN="ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api" ensure_services_stopped $SERVICES_DOWN ceilometer-6.1.5/devstack/upgrade/upgrade.sh0000775000567000056710000000573513072744706022261 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash # ``upgrade-ceilometer`` echo "*********************************************************************" echo "Begin $0" echo "*********************************************************************" # Clean up any resources that may be in use cleanup() { set +o errexit echo "*********************************************************************" echo "ERROR: Abort $0" echo "*********************************************************************" # Kill ourselves to signal any calling process trap 2; kill -2 $$ } trap cleanup SIGHUP SIGINT SIGTERM # Keep track of the grenade directory RUN_DIR=$(cd $(dirname "$0") && pwd) # Source params source $GRENADE_DIR/grenaderc # Import common functions source $GRENADE_DIR/functions # This script exits on an error so that errors don't compound and you see # only the first error that occurred. set -o errexit # Save mongodb state (replace with snapshot) # TODO(chdent): There used to be a 'register_db_to_save ceilometer' # which may wish to consider putting back in. if grep -q 'connection *= *mongo' /etc/ceilometer/ceilometer.conf; then mongodump --db ceilometer --out $SAVE_DIR/ceilometer-dump.$BASE_RELEASE fi # Upgrade Ceilometer # ================== # Locate ceilometer devstack plugin, the directory above the # grenade plugin. CEILOMETER_DEVSTACK_DIR=$(dirname $(dirname $0)) # Get functions from current DevStack source $TARGET_DEVSTACK_DIR/functions source $TARGET_DEVSTACK_DIR/stackrc source $TARGET_DEVSTACK_DIR/lib/apache # Get ceilometer functions from devstack plugin source $CEILOMETER_DEVSTACK_DIR/settings # Print the commands being run so that we can see the command that triggers # an error. set -o xtrace # Install the target ceilometer source $CEILOMETER_DEVSTACK_DIR/plugin.sh stack install # calls upgrade-ceilometer for specific release upgrade_project ceilometer $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH # Migrate the database # NOTE(chdent): As we evolve BIN_DIR is likely to be defined, but # currently it is not. CEILOMETER_BIN_DIR=$(dirname $(which ceilometer-dbsync)) $CEILOMETER_BIN_DIR/ceilometer-dbsync || die $LINENO "DB sync error" # Start Ceilometer start_ceilometer # Note these are process names, not service names ensure_services_started "ceilometer-polling --polling-namespaces compute" \ "ceilometer-polling --polling-namespaces central" \ "ceilometer-polling --polling-namespaces ipmi" \ ceilometer-agent-notification \ ceilometer-api \ ceilometer-collector # Save mongodb state (replace with snapshot) if grep -q 'connection *= *mongo' /etc/ceilometer/ceilometer.conf; then mongodump --db ceilometer --out $SAVE_DIR/ceilometer-dump.$TARGET_RELEASE fi set +o xtrace echo "*********************************************************************" echo "SUCCESS: End $0" echo "*********************************************************************" ceilometer-6.1.5/devstack/apache-ceilometer.template0000664000567000056710000000076213072744703023740 0ustar jenkinsjenkins00000000000000Listen %PORT% WSGIDaemonProcess ceilometer-api processes=2 threads=10 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup ceilometer-api WSGIScriptAlias / %WSGIAPP% WSGIApplicationGroup %{GLOBAL} = 2.4> ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/ceilometer.log CustomLog /var/log/%APACHE_NAME%/ceilometer_access.log combined WSGISocketPrefix /var/run/%APACHE_NAME% ceilometer-6.1.5/PKG-INFO0000664000567000056710000000207513072745164016126 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: ceilometer Version: 6.1.5 Summary: OpenStack Telemetry Home-page: http://docs.openstack.org/developer/ceilometer/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: ceilometer ========== Release notes can be read online at: http://docs.openstack.org/developer/ceilometer/releasenotes/index.html Documentation for the project can be found at: http://docs.openstack.org/developer/ceilometer/ The project home is at: http://launchpad.net/ceilometer Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Topic :: System :: Monitoring ceilometer-6.1.5/.testr.conf0000664000567000056710000000074313072744703017115 0ustar jenkinsjenkins00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-600} \ ${PYTHON:-python} -m subunit.run discover ${OS_TEST_PATH:-./ceilometer/tests} -t . $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list # NOTE(chdent): Only used/matches on gabbi-related tests. group_regex=(gabbi\.(suitemaker|driver)\.test_gabbi_(?:prefix_|)[^_]+)_